diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_radix.c')
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_radix.c | 186 |
1 files changed, 103 insertions, 83 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c index 3cb0c9843d01..b3e6e73d6a08 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c @@ -15,12 +15,15 @@ #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> +#include "book3s_hv.h" #include <asm/page.h> #include <asm/mmu.h> #include <asm/pgalloc.h> #include <asm/pte-walk.h> #include <asm/ultravisor.h> #include <asm/kvm_book3s_uvmem.h> +#include <asm/plpar_wrappers.h> +#include <asm/firmware.h> /* * Supported radix tree geometry. @@ -33,14 +36,21 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, gva_t eaddr, void *to, void *from, unsigned long n) { - int uninitialized_var(old_pid), old_lpid; + int old_pid, old_lpid; unsigned long quadrant, ret = n; bool is_load = !!to; + if (kvmhv_is_nestedv2()) + return H_UNSUPPORTED; + /* Can't access quadrants 1 or 2 in non-HV mode, call the HV to do it */ if (kvmhv_on_pseries()) return plpar_hcall_norets(H_COPY_TOFROM_GUEST, lpid, pid, eaddr, - __pa(to), __pa(from), n); + (to != NULL) ? __pa(to): 0, + (from != NULL) ? __pa(from): 0, n); + + if (eaddr & (0xFFFUL << 52)) + return ret; quadrant = 1; if (!pid) @@ -52,6 +62,8 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, preempt_disable(); + asm volatile("hwsync" ::: "memory"); + isync(); /* switch the lpid first to avoid running host with unallocated pid */ old_lpid = mfspr(SPRN_LPID); if (old_lpid != lpid) @@ -63,11 +75,15 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, } isync(); + pagefault_disable(); if (is_load) - ret = probe_user_read(to, (const void __user *)from, n); + ret = __copy_from_user_inatomic(to, (const void __user *)from, n); else - ret = probe_user_write((void __user *)to, from, n); + ret = __copy_to_user_inatomic((void __user *)to, from, n); + pagefault_enable(); + asm volatile("hwsync" ::: "memory"); + isync(); /* switch the pid first to avoid running host with unallocated pid */ if (quadrant == 1 && pid != old_pid) mtspr(SPRN_PID, old_pid); @@ -79,13 +95,12 @@ unsigned long __kvmhv_copy_tofrom_guest_radix(int lpid, int pid, return ret; } -EXPORT_SYMBOL_GPL(__kvmhv_copy_tofrom_guest_radix); static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, void *from, unsigned long n) { int lpid = vcpu->kvm->arch.lpid; - int pid = vcpu->arch.pid; + int pid; /* This would cause a data segment intr so don't allow the access */ if (eaddr & (0x3FFUL << 52)) @@ -98,6 +113,8 @@ static long kvmhv_copy_tofrom_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, /* If accessing quadrant 3 then pid is expected to be 0 */ if (((eaddr >> 62) & 0x3) == 0x3) pid = 0; + else + pid = kvmppc_get_pid(vcpu); eaddr &= ~(0xFFFUL << 52); @@ -115,14 +132,12 @@ long kvmhv_copy_from_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *to, return ret; } -EXPORT_SYMBOL_GPL(kvmhv_copy_from_guest_radix); long kvmhv_copy_to_guest_radix(struct kvm_vcpu *vcpu, gva_t eaddr, void *from, unsigned long n) { return kvmhv_copy_tofrom_guest_radix(vcpu, eaddr, NULL, from, n); } -EXPORT_SYMBOL_GPL(kvmhv_copy_to_guest_radix); int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *gpte, u64 root, @@ -160,7 +175,10 @@ int kvmppc_mmu_walk_radix_tree(struct kvm_vcpu *vcpu, gva_t eaddr, return -EINVAL; /* Read the entry from guest memory */ addr = base + (index * sizeof(rpte)); + + kvm_vcpu_srcu_read_lock(vcpu); ret = kvm_read_guest(kvm, addr, &rpte, sizeof(rpte)); + kvm_vcpu_srcu_read_unlock(vcpu); if (ret) { if (pte_ret_p) *pte_ret_p = addr; @@ -236,7 +254,9 @@ int kvmppc_mmu_radix_translate_table(struct kvm_vcpu *vcpu, gva_t eaddr, /* Read the table to find the root of the radix tree */ ptbl = (table & PRTB_MASK) + (table_index * sizeof(entry)); + kvm_vcpu_srcu_read_lock(vcpu); ret = kvm_read_guest(kvm, ptbl, &entry, sizeof(entry)); + kvm_vcpu_srcu_read_unlock(vcpu); if (ret) return ret; @@ -256,7 +276,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Work out effective PID */ switch (eaddr >> 62) { case 0: - pid = vcpu->arch.pid; + pid = kvmppc_get_pid(vcpu); break; case 3: pid = 0; @@ -280,9 +300,9 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, } else { if (!(pte & _PAGE_PRIVILEGED)) { /* Check AMR/IAMR to see if strict mode is in force */ - if (vcpu->arch.amr & (1ul << 62)) + if (kvmppc_get_amr_hv(vcpu) & (1ul << 62)) gpte->may_read = 0; - if (vcpu->arch.amr & (1ul << 63)) + if (kvmppc_get_amr_hv(vcpu) & (1ul << 63)) gpte->may_write = 0; if (vcpu->arch.iamr & (1ul << 62)) gpte->may_execute = 0; @@ -293,7 +313,7 @@ int kvmppc_mmu_radix_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, } void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, - unsigned int pshift, unsigned int lpid) + unsigned int pshift, u64 lpid) { unsigned long psize = PAGE_SIZE; int psi; @@ -313,14 +333,24 @@ void kvmppc_radix_tlbie_page(struct kvm *kvm, unsigned long addr, } psi = shift_to_mmu_psize(pshift); - rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); - rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), - lpid, rb); + + if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) { + rb = addr | (mmu_get_ap(psi) << PPC_BITLSHIFT(58)); + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(0, 0, 1), + lpid, rb); + } else { + rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU, + H_RPTI_TYPE_NESTED | + H_RPTI_TYPE_TLB, + psize_to_rpti_pgsize(psi), + addr, addr + psize); + } + if (rc) pr_err("KVM: TLB page invalidation hcall failed, rc=%ld\n", rc); } -static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) +static void kvmppc_radix_flush_pwc(struct kvm *kvm, u64 lpid) { long rc; @@ -329,8 +359,14 @@ static void kvmppc_radix_flush_pwc(struct kvm *kvm, unsigned int lpid) return; } - rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), - lpid, TLBIEL_INVAL_SET_LPID); + if (!firmware_has_feature(FW_FEATURE_RPT_INVALIDATE)) + rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(1, 0, 1), + lpid, TLBIEL_INVAL_SET_LPID); + else + rc = pseries_rpt_invalidate(lpid, H_RPTI_TARGET_CMMU, + H_RPTI_TYPE_NESTED | + H_RPTI_TYPE_PWC, H_RPTI_PAGE_ALL, + 0, -1UL); if (rc) pr_err("KVM: TLB PWC invalidation hcall failed, rc=%ld\n", rc); } @@ -342,7 +378,7 @@ static unsigned long kvmppc_radix_update_pte(struct kvm *kvm, pte_t *ptep, return __radix_pte_update(ptep, clr, set); } -void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, +static void kvmppc_radix_set_pte_at(struct kvm *kvm, unsigned long addr, pte_t *ptep, pte_t pte) { radix__set_pte_at(kvm->mm, addr, ptep, pte, 0); @@ -387,7 +423,7 @@ static void kvmppc_pmd_free(pmd_t *pmdp) void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, unsigned int shift, const struct kvm_memory_slot *memslot, - unsigned int lpid) + u64 lpid) { unsigned long old; @@ -438,7 +474,7 @@ void kvmppc_unmap_pte(struct kvm *kvm, pte_t *pte, unsigned long gpa, * (or 4kB) mappings (of sub-pages of the same 2MB page). */ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, - unsigned int lpid) + u64 lpid) { if (full) { memset(pte, 0, sizeof(long) << RADIX_PTE_INDEX_SIZE); @@ -459,7 +495,7 @@ static void kvmppc_unmap_free_pte(struct kvm *kvm, pte_t *pte, bool full, } static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, - unsigned int lpid) + u64 lpid) { unsigned long im; pmd_t *p = pmd; @@ -467,7 +503,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, for (im = 0; im < PTRS_PER_PMD; ++im, ++p) { if (!pmd_present(*p)) continue; - if (pmd_is_leaf(*p)) { + if (pmd_leaf(*p)) { if (full) { pmd_clear(p); } else { @@ -479,7 +515,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, } else { pte_t *pte; - pte = pte_offset_map(p, 0); + pte = pte_offset_kernel(p, 0); kvmppc_unmap_free_pte(kvm, pte, full, lpid); pmd_clear(p); } @@ -488,7 +524,7 @@ static void kvmppc_unmap_free_pmd(struct kvm *kvm, pmd_t *pmd, bool full, } static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, - unsigned int lpid) + u64 lpid) { unsigned long iu; pud_t *p = pud; @@ -496,7 +532,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { if (!pud_present(*p)) continue; - if (pud_is_leaf(*p)) { + if (pud_leaf(*p)) { pud_clear(p); } else { pmd_t *pmd; @@ -509,7 +545,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, pud_free(kvm->mm, pud); } -void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) +void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, u64 lpid) { unsigned long ig; @@ -536,7 +572,7 @@ void kvmppc_free_radix(struct kvm *kvm) } static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { pte_t *pte = pte_offset_kernel(pmd, 0); @@ -552,7 +588,7 @@ static void kvmppc_unmap_free_pmd_entry_table(struct kvm *kvm, pmd_t *pmd, } static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { pmd_t *pmd = pmd_offset(pud, 0); @@ -578,7 +614,7 @@ static void kvmppc_unmap_free_pud_entry_table(struct kvm *kvm, pud_t *pud, int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, unsigned long gpa, unsigned int level, - unsigned long mmu_seq, unsigned int lpid, + unsigned long mmu_seq, u64 lpid, unsigned long *rmapp, struct rmap_nested **n_rmap) { pgd_t *pgd; @@ -599,18 +635,18 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, new_pud = pud_alloc_one(kvm->mm, gpa); pmd = NULL; - if (pud && pud_present(*pud) && !pud_is_leaf(*pud)) + if (pud && pud_present(*pud) && !pud_leaf(*pud)) pmd = pmd_offset(pud, gpa); else if (level <= 1) new_pmd = kvmppc_pmd_alloc(); - if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_is_leaf(*pmd))) + if (level == 0 && !(pmd && pmd_present(*pmd) && !pmd_leaf(*pmd))) new_ptep = kvmppc_pte_alloc(); /* Check if we might have been invalidated; let the guest retry if so */ spin_lock(&kvm->mmu_lock); ret = -EAGAIN; - if (mmu_notifier_retry(kvm, mmu_seq)) + if (mmu_invalidate_retry(kvm, mmu_seq)) goto out_unlock; /* Now traverse again under the lock and change the tree */ @@ -622,7 +658,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, new_pud = NULL; } pud = pud_offset(p4d, gpa); - if (pud_is_leaf(*pud)) { + if (pud_leaf(*pud)) { unsigned long hgpa = gpa & PUD_MASK; /* Check if we raced and someone else has set the same thing */ @@ -673,7 +709,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, new_pmd = NULL; } pmd = pmd_offset(pud, gpa); - if (pmd_is_leaf(*pmd)) { + if (pmd_leaf(*pmd)) { unsigned long lgpa = gpa & PMD_MASK; /* Check if we raced and someone else has set the same thing */ @@ -755,7 +791,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, } bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, - unsigned long gpa, unsigned int lpid) + unsigned long gpa, u64 lpid) { unsigned long pgflags; unsigned int shift; @@ -785,7 +821,7 @@ bool kvmppc_hv_handle_set_rc(struct kvm *kvm, bool nested, bool writing, int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm_memory_slot *memslot, - bool writing, bool kvm_ro, + bool writing, pte_t *inserted_pte, unsigned int *levelp) { struct kvm *kvm = vcpu->kvm; @@ -793,40 +829,21 @@ int kvmppc_book3s_instantiate_page(struct kvm_vcpu *vcpu, unsigned long mmu_seq; unsigned long hva, gfn = gpa >> PAGE_SHIFT; bool upgrade_write = false; - bool *upgrade_p = &upgrade_write; pte_t pte, *ptep; unsigned int shift, level; int ret; bool large_enable; + kvm_pfn_t pfn; /* used to check for invalidations in progress */ - mmu_seq = kvm->mmu_notifier_seq; + mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); - /* - * Do a fast check first, since __gfn_to_pfn_memslot doesn't - * do it with !atomic && !async, which is how we call it. - * We always ask for write permission since the common case - * is that the page is writable. - */ hva = gfn_to_hva_memslot(memslot, gfn); - if (!kvm_ro && get_user_page_fast_only(hva, FOLL_WRITE, &page)) { - upgrade_write = true; - } else { - unsigned long pfn; - - /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, - writing, upgrade_p); - if (is_error_noslot_pfn(pfn)) - return -EFAULT; - page = NULL; - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (PageReserved(page)) - page = NULL; - } - } + pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0, + &upgrade_write, &page); + if (is_error_noslot_pfn(pfn)) + return -EFAULT; /* * Read the PTE from the process' radix tree and use that @@ -914,7 +931,6 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot; long ret; bool writing = !!(dsisr & DSISR_ISSTORE); - bool kvm_ro = false; /* Check for unusual errors */ if (dsisr & DSISR_UNSUPP_MMU) { @@ -924,7 +940,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, if (dsisr & DSISR_BADACCESS) { /* Reflect to the guest as DSI */ pr_err("KVM: Got radix HV page fault with DSISR=%lx\n", dsisr); - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } @@ -949,7 +967,9 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, * Bad address in guest page table tree, or other * unusual error - reflect it to the guest as DSI. */ - kvmppc_core_queue_data_storage(vcpu, ea, dsisr); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, dsisr); return RESUME_GUEST; } return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing); @@ -958,11 +978,11 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, if (memslot->flags & KVM_MEM_READONLY) { if (writing) { /* give the guest a DSI */ - kvmppc_core_queue_data_storage(vcpu, ea, DSISR_ISSTORE | - DSISR_PROTFAULT); + kvmppc_core_queue_data_storage(vcpu, + kvmppc_get_msr(vcpu) & SRR1_PREFIXED, + ea, DSISR_ISSTORE | DSISR_PROTFAULT); return RESUME_GUEST; } - kvm_ro = true; } /* Failed to set the reference/change bits */ @@ -980,7 +1000,7 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, /* Try to insert a pte */ ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot, writing, - kvm_ro, NULL, NULL); + NULL, NULL); if (ret == 0 || ret == -EAGAIN) ret = RESUME_GUEST; @@ -988,8 +1008,8 @@ int kvmppc_book3s_radix_page_fault(struct kvm_vcpu *vcpu, } /* Called with kvm->mmu_lock held */ -int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +void kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; @@ -997,24 +1017,23 @@ int kvm_unmap_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) { uv_page_inval(kvm->arch.lpid, gpa, PAGE_SHIFT); - return 0; + return; } ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep)) kvmppc_unmap_pte(kvm, ptep, gpa, shift, memslot, kvm->arch.lpid); - return 0; } /* Called with kvm->mmu_lock held */ -int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +bool kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; - int ref = 0; + bool ref = false; unsigned long old, *rmapp; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) @@ -1030,26 +1049,27 @@ int kvm_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, kvmhv_update_nest_rmap_rc_list(kvm, rmapp, _PAGE_ACCESSED, 0, old & PTE_RPN_MASK, 1UL << shift); - ref = 1; + ref = true; } return ref; } /* Called with kvm->mmu_lock held */ -int kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, - unsigned long gfn) +bool kvm_test_age_radix(struct kvm *kvm, struct kvm_memory_slot *memslot, + unsigned long gfn) + { pte_t *ptep; unsigned long gpa = gfn << PAGE_SHIFT; unsigned int shift; - int ref = 0; + bool ref = false; if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE) return ref; ptep = find_kvm_secondary_pte(kvm, gpa, &shift); if (ptep && pte_present(*ptep) && pte_young(*ptep)) - ref = 1; + ref = true; return ref; } @@ -1161,7 +1181,7 @@ void kvmppc_radix_flush_memslot(struct kvm *kvm, * Increase the mmu notifier sequence number to prevent any page * fault that read the memslot earlier from writing a PTE. */ - kvm->mmu_notifier_seq++; + kvm->mmu_invalidate_seq++; spin_unlock(&kvm->mmu_lock); } @@ -1426,7 +1446,7 @@ static const struct file_operations debugfs_radix_fops = { void kvmhv_radix_debugfs_init(struct kvm *kvm) { - debugfs_create_file("radix", 0400, kvm->arch.debugfs_dir, kvm, + debugfs_create_file("radix", 0400, kvm->debugfs_dentry, kvm, &debugfs_radix_fops); } |
