diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_radix.c')
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_radix.c | 83 |
1 files changed, 34 insertions, 49 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 572707858d65..b3e6e73d6a08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -15,6 +15,7 @@ #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> +#include "book3s_hv.h" #include <asm/page.h> #include <asm/mmu.h> #include <asm/pgalloc.h> @@ -39,6 +40,9 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, unsigned long quadrant, ret = n; bool is_load = !!to; + if (kvmhv_is_nestedv2()) + return H_UNSUPPORTED; + /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ if (kvmhv_on_pseries()) return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, @@ -96,7 +100,7 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, void *from, unsigned long n) { int lpid = vcpu->kvm->arch.lpid; - int pid = vcpu->arch.pid; + int pid; /* This would cause a data segment intr so don't allow the access */ if (eaddr & (0x3FFUL << 52)) @@ -109,6 +113,8 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, /* If accessing quadrant 3 then pid is expected to be 0 */ if (((eaddr >> 62) & 0x3) == 0x3) pid = 0; + else + pid = kvmppc_get_pid(vcpu); eaddr &= ~(0xFFFUL << 52); @@ -270,7 +276,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Work out effective PID */ switch (eaddr >> 62) { case 0: - pid = vcpu->arch.pid; + pid = kvmppc_get_pid(vcpu); break; case 3: pid = 0; @@ -294,9 +300,9 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, } else { if (!(pte & _PAGE_PRIVILEGED)) { /* Check AMR/IAMR to see if strict mode is in force */ - if (vcpu->arch.amr & (1ul << 62)) + if (kvmppc_get_amr_hv(vcpu) & (1ul << 62)) gpte->may_read = 0; - if (vcpu->arch.amr & (1ul << 63)) + if (kvmppc_get_amr_hv(vcpu) & (1ul << 63)) gpte->may_write = 0; if (vcpu->arch.iamr & (1ul << 62)) gpte->may_execute = 0; @@ -307,7 +313,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, } void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, - unsigned int pshift, unsigned int lpid) + unsigned int pshift, u64 lpid) { unsigned long psize = PAGE_SIZE; int psi; @@ -344,7 +350,7 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); } -static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) +static void kvmppc_radix_flush_pwc(struct kvm *kvm, u64 lpid) { long rc; @@ -417,7 +423,7 @@ static void kvmppc_pmd_free(pmd_t *pmdp) void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, - unsigned int lpid) + u64 lpid) { unsigned long old; @@ -468,7 +474,7 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, * (or 4kB) mappings (of sub-pages of the same 2MB page). */ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, - unsigned int lpid) + u64 lpid) { if (full) { memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE); @@ -489,7 +495,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, } static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, - unsigned int lpid) + u64 lpid) { unsigned long im; pmd_t *p = pmd; @@ -497,7 +503,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, for (im = 0; im < PTRS_PER_PMD; ++im, ++p) { if (!pmd_present(*p)) continue; - if (pmd_is_leaf(*p)) { + if (pmd_leaf(*p)) { if (full) { pmd_clear(p); } else { @@ -518,7 +524,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, } static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, - unsigned int lpid) + u64 lpid) { unsigned long iu; pud_t *p = pud; @@ -526,7 +532,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { if (!pud_present(*p)) continue; - if (pud_is_leaf(*p)) { + if (pud_leaf(*p)) { pud_clear(p); } else { pmd_t *pmd; @@ -539,7 +545,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, pud_free(kvm->mm, pud); } -void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) +void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, u64 lpid) { unsigned long ig; @@ -566,7 +572,7 @@ void kvmppc_free_radix(struct kvm *kvm) } static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { pte_t *pte = pte_offset_kernel(pmd, 0); @@ -582,7 +588,7 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, } static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { pmd_t *pmd = pmd_offset(pud, 0); @@ -608,7 +614,7 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, - unsigned long mmu_seq, unsigned int lpid, + unsigned long mmu_seq, u64 lpid, unsigned long *rmapp, struct rmap_nested **n_rmap) { pgd_t *pgd; @@ -629,12 +635,12 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, new_pud = pud_alloc_one(kvm->mm, gpa); pmd = NULL; - if (pud && pud_present(*pud) && !pud_is_leaf(*pud)) + if (pud && pud_present(*pud) && !pud_leaf(*pud)) pmd = pmd_offset(pud, gpa); else if (level <= 1) new_pmd = kvmppc_pmd_alloc(); - if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) + if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd))) new_ptep = kvmppc_pte_alloc(); /* Check if we might have been invalidated; let the guest retry if so */ @@ -652,7 +658,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, new_pud = NULL; } pud = pud_offset(p4d, gpa); - if (pud_is_leaf(*pud)) { + if (pud_leaf(*pud)) { unsigned long hgpa = gpa & PUD_MASK; /* Check if we raced and someone else has set the same thing */ @@ -703,7 +709,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, new_pmd = NULL; } pmd = pmd_offset(pud, gpa); - if (pmd_is_leaf(*pmd)) { + if (pmd_leaf(*pmd)) { unsigned long lgpa = gpa & PMD_MASK; /* Check if we raced and someone else has set the same thing */ @@ -785,7 +791,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, } bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { unsigned long pgflags; unsigned int shift; @@ -815,7 +821,7 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, - bool writing, bool kvm_ro, + bool writing, pte_t *inserted_pte, unsigned int *levelp) { struct kvm *kvm = vcpu->kvm; @@ -823,40 +829,21 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long mmu_seq; unsigned long hva, gfn = gpa >> PAGE_SHIFT; bool upgrade_write = false; - bool *upgrade_p = &upgrade_write; pte_t pte, *ptep; unsigned int shift, level; int ret; bool large_enable; + kvm_pfn_t pfn; /* used to check for invalidations in progress */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); - /* - * Do a fast check first, since __gfn_to_pfn_memslot doesn't - * do it with !atomic && !async, which is how we call it. - * We always ask for write permission since the common case - * is that the page is writable. - */ hva = gfn_to_hva_memslot(memslot, gfn); - if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) { - upgrade_write = true; - } else { - unsigned long pfn; - - /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL, - writing, upgrade_p, NULL); - if (is_error_noslot_pfn(pfn)) - return -EFAULT; - page = NULL; - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (PageReserved(page)) - page = NULL; - } - } + pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0, + &upgrade_write, &page); + if (is_error_noslot_pfn(pfn)) + return -EFAULT; /* * Read the PTE from the process' radix tree and use that @@ -944,7 +931,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot; long ret; bool writing = !!(dsisr & DSISR_ISSTORE); - bool kvm_ro = false; /* Check for unusual errors */ if (dsisr & DSISR_UNSUPP_MMU) { @@ -997,7 +983,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } - kvm_ro = true; } /* Failed to set the reference/change bits */ @@ -1015,7 +1000,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, /* Try to insert a pte */ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, - kvm_ro, NULL, NULL); + NULL, NULL); if (ret == 0 || ret == -EAGAIN) ret = RESUME_GUEST; |
