summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2018-12-21 11:48:41 +0100
committerPaolo Bonzini <pbonzini@redhat.com>2018-12-21 11:48:41 +0100
commitc6ad45973380af5ef43b708a54d7475d93f86638 (patch)
treef28aaacfa9eafe320c8b4e57aeabbfb93d17b9ef /arch/powerpc/kvm
parente81434995081fd7efb755fd75576b35dbb0850b1 (diff)
parentae59a7e1945bc2245bbb587071ca737e00daf443 (diff)
Merge tag 'kvm-ppc-next-4.21-2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc into kvm-next
Second PPC KVM update for 4.21 This has 5 commits that fix page dirty tracking when running nested HV KVM guests, from Suraj Jitindar Singh.
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c26
-rw-r--r--arch/powerpc/kvm/book3s_hv_nested.c74
2 files changed, 88 insertions, 12 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index 870ef9d5eee6..fb88167a402a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -294,8 +294,8 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
return 0;
}
-static void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
- unsigned int pshift, unsigned int lpid)
+void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr,
+ unsigned int pshift, unsigned int lpid)
{
unsigned long psize = PAGE_SIZE;
int psi;
@@ -982,12 +982,18 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long gpa = gfn << PAGE_SHIFT;
unsigned int shift;
int ref = 0;
+ unsigned long old, *rmapp;
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_young(*ptep)) {
- kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
- gpa, shift);
+ old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_ACCESSED, 0,
+ gpa, shift);
/* XXX need to flush tlb here? */
+ /* Also clear bit in ptes in shadow pgtable for nested guests */
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0,
+ old & PTE_RPN_MASK,
+ 1UL << shift);
ref = 1;
}
return ref;
@@ -1017,15 +1023,23 @@ static int kvm_radix_test_clear_dirty(struct kvm *kvm,
pte_t *ptep;
unsigned int shift;
int ret = 0;
+ unsigned long old, *rmapp;
ptep = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (ptep && pte_present(*ptep) && pte_dirty(*ptep)) {
ret = 1;
if (shift)
ret = 1 << (shift - PAGE_SHIFT);
- kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
- gpa, shift);
+ spin_lock(&kvm->mmu_lock);
+ old = kvmppc_radix_update_pte(kvm, ptep, _PAGE_DIRTY, 0,
+ gpa, shift);
kvmppc_radix_tlbie_page(kvm, gpa, shift, kvm->arch.lpid);
+ /* Also clear bit in ptes in shadow pgtable for nested guests */
+ rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
+ kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_DIRTY, 0,
+ old & PTE_RPN_MASK,
+ 1UL << shift);
+ spin_unlock(&kvm->mmu_lock);
}
return ret;
}
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 9dce4b9c1d9c..735e0ac6f5b2 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -788,6 +788,57 @@ void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
*n_rmap = NULL;
}
+static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
+ unsigned long clr, unsigned long set,
+ unsigned long hpa, unsigned long mask)
+{
+ struct kvm_nested_guest *gp;
+ unsigned long gpa;
+ unsigned int shift, lpid;
+ pte_t *ptep;
+
+ gpa = n_rmap & RMAP_NESTED_GPA_MASK;
+ lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
+ gp = kvmhv_find_nested(kvm, lpid);
+ if (!gp)
+ return;
+
+ /* Find the pte */
+ ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
+ /*
+ * If the pte is present and the pfn is still the same, update the pte.
+ * If the pfn has changed then this is a stale rmap entry, the nested
+ * gpa actually points somewhere else now, and there is nothing to do.
+ * XXX A future optimisation would be to remove the rmap entry here.
+ */
+ if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
+ __radix_pte_update(ptep, clr, set);
+ kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
+ }
+}
+
+/*
+ * For a given list of rmap entries, update the rc bits in all ptes in shadow
+ * page tables for nested guests which are referenced by the rmap list.
+ */
+void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
+ unsigned long clr, unsigned long set,
+ unsigned long hpa, unsigned long nbytes)
+{
+ struct llist_node *entry = ((struct llist_head *) rmapp)->first;
+ struct rmap_nested *cursor;
+ unsigned long rmap, mask;
+
+ if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
+ return;
+
+ mask = PTE_RPN_MASK & ~(nbytes - 1);
+ hpa &= mask;
+
+ for_each_nest_rmap_safe(cursor, entry, &rmap)
+ kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
+}
+
static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
unsigned long hpa, unsigned long mask)
{
@@ -1150,7 +1201,7 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
struct kvm *kvm = vcpu->kvm;
bool writing = !!(dsisr & DSISR_ISSTORE);
u64 pgflags;
- bool ret;
+ long ret;
/* Are the rc bits set in the L1 partition scoped pte? */
pgflags = _PAGE_ACCESSED;
@@ -1163,16 +1214,22 @@ static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
/* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
ret = kvmppc_hv_handle_set_rc(kvm, kvm->arch.pgtable, writing,
gpte.raddr, kvm->arch.lpid);
- spin_unlock(&kvm->mmu_lock);
- if (!ret)
- return -EINVAL;
+ if (!ret) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
/* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
ret = kvmppc_hv_handle_set_rc(kvm, gp->shadow_pgtable, writing, n_gpa,
gp->shadow_lpid);
if (!ret)
- return -EINVAL;
- return 0;
+ ret = -EINVAL;
+ else
+ ret = 0;
+
+out_unlock:
+ spin_unlock(&kvm->mmu_lock);
+ return ret;
}
static inline int kvmppc_radix_level_to_shift(int level)
@@ -1322,6 +1379,8 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
return ret;
shift = kvmppc_radix_level_to_shift(level);
}
+ /* Align gfn to the start of the page */
+ gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
/* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
@@ -1329,6 +1388,9 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
perm |= gpte.may_read ? 0UL : _PAGE_READ;
perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
+ /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
+ perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
+ perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
pte = __pte(pte_val(pte) & ~perm);
/* What size pte can we insert? */