diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_mmu_hv.c')
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 145 |
1 files changed, 69 insertions, 76 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index c63e263312a4..f305395cf26e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -28,6 +28,7 @@ #include <asm/pte-walk.h> #include "book3s.h" +#include "book3s_hv.h" #include "trace_hv.h" //#define DEBUG_RESIZE_HPT 1 @@ -58,7 +59,7 @@ struct kvm_resize_hpt { /* Possible values and their usage: * <0 an error occurred during allocation, * -EBUSY allocation is in the progress, - * 0 allocation made successfuly. + * 0 allocation made successfully. */ int error; @@ -120,13 +121,13 @@ void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) kvm->arch.hpt = *info; kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); - pr_debug("KVM guest htab at %lx (order %ld), LPID %x\n", + pr_debug("KVM guest htab at %lx (order %ld), LPID %llx\n", info->virt, (long)info->order, kvm->arch.lpid); } -long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) +int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) { - long err = -EBUSY; + int err = -EBUSY; struct kvm_hpt_info info; mutex_lock(&kvm->arch.mmu_setup_lock); @@ -182,7 +183,7 @@ void kvmppc_free_hpt(struct kvm_hpt_info *info) vfree(info->rev); info->rev = NULL; if (info->cma) - kvm_free_hpt_cma(virt_to_page(info->virt), + kvm_free_hpt_cma(virt_to_page((void *)info->virt), 1 << (info->order - PAGE_SHIFT)); else if (info->virt) free_pages(info->virt, info->order - PAGE_SHIFT); @@ -256,26 +257,34 @@ void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, int kvmppc_mmu_hv_init(void) { - unsigned long host_lpid, rsvd_lpid; + unsigned long nr_lpids; if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) return -EINVAL; - host_lpid = 0; - if (cpu_has_feature(CPU_FTR_HVMODE)) - host_lpid = mfspr(SPRN_LPID); + if (cpu_has_feature(CPU_FTR_HVMODE)) { + if (WARN_ON(mfspr(SPRN_LPID) != 0)) + return -EINVAL; + nr_lpids = 1UL << mmu_lpid_bits; + } else { + nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT; + } - /* POWER8 and above have 12-bit LPIDs (10-bit in POWER7) */ - if (cpu_has_feature(CPU_FTR_ARCH_207S)) - rsvd_lpid = LPID_RSVD; - else - rsvd_lpid = LPID_RSVD_POWER7; + if (!cpu_has_feature(CPU_FTR_ARCH_300)) { + /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */ + if (cpu_has_feature(CPU_FTR_ARCH_207S)) + WARN_ON(nr_lpids != 1UL << 12); + else + WARN_ON(nr_lpids != 1UL << 10); - kvmppc_init_lpid(rsvd_lpid + 1); + /* + * Reserve the last implemented LPID use in partition + * switching for POWER7 and POWER8. + */ + nr_lpids -= 1; + } - kvmppc_claim_lpid(host_lpid); - /* rsvd_lpid is reserved for use in partition switching */ - kvmppc_claim_lpid(rsvd_lpid); + kvmppc_init_lpid(nr_lpids); return 0; } @@ -339,7 +348,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned long v, orig_v, gr; __be64 *hptep; long int index; - int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); + int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR); if (kvm_is_radix(vcpu->kvm)) return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); @@ -377,7 +386,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, /* Get PP bits and key for permission check */ pp = gr & (HPTE_R_PP0 | HPTE_R_PP); - key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; + key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; key &= slb_v; /* Calculate permissions */ @@ -407,20 +416,25 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, * embodied here.) If the instruction isn't a load or store, then * this doesn't return anything useful. */ -static int instruction_is_store(unsigned int instr) +static int instruction_is_store(ppc_inst_t instr) { unsigned int mask; + unsigned int suffix; mask = 0x10000000; - if ((instr & 0xfc000000) == 0x7c000000) + suffix = ppc_inst_val(instr); + if (ppc_inst_prefixed(instr)) + suffix = ppc_inst_suffix(instr); + else if ((suffix & 0xfc000000) == 0x7c000000) mask = 0x100; /* major opcode 31 */ - return (instr & mask) != 0; + return (suffix & mask) != 0; } int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, unsigned long gpa, gva_t ea, int is_store) { - u32 last_inst; + ppc_inst_t last_inst; + bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED); /* * Fast path - check if the guest physical address corresponds to a @@ -435,7 +449,7 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, NULL); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (!ret) { - kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); + kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4)); return RESUME_GUEST; } } @@ -450,7 +464,16 @@ int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, /* * WARNING: We do not know for sure whether the instruction we just * read from memory is the same that caused the fault in the first - * place. If the instruction we read is neither an load or a store, + * place. + * + * If the fault is prefixed but the instruction is not or vice + * versa, try again so that we don't advance pc the wrong amount. + */ + if (ppc_inst_prefixed(last_inst) != is_prefixed) + return RESUME_GUEST; + + /* + * If the instruction we read is neither an load or a store, * then it can't access memory, so we don't need to worry about * enforcing access permissions. So, assuming it is a load or * store, we just check that its direction (load or store) is @@ -570,7 +593,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, return -EFAULT; /* used to check for invalidations in progress */ - mmu_seq = kvm->mmu_notifier_seq; + mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); ret = -EFAULT; @@ -580,27 +603,10 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, write_ok = writing; hva = gfn_to_hva_memslot(memslot, gfn); - /* - * Do a fast check first, since __gfn_to_pfn_memslot doesn't - * do it with !atomic && !async, which is how we call it. - * We always ask for write permission since the common case - * is that the page is writable. - */ - if (get_user_page_fast_only(hva, FOLL_WRITE, &page)) { - write_ok = true; - } else { - /* Call KVM generic code to do the slow-path check */ - pfn = __gfn_to_pfn_memslot(memslot, gfn, false, NULL, - writing, &write_ok, NULL); - if (is_error_noslot_pfn(pfn)) - return -EFAULT; - page = NULL; - if (pfn_valid(pfn)) { - page = pfn_to_page(pfn); - if (PageReserved(page)) - page = NULL; - } - } + pfn = __kvm_faultin_pfn(memslot, gfn, writing ? FOLL_WRITE : 0, + &write_ok, &page); + if (is_error_noslot_pfn(pfn)) + return -EFAULT; /* * Read the PTE from the process' radix tree and use that @@ -685,7 +691,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, /* Check if we might have been invalidated; let the guest retry if so */ ret = RESUME_GUEST; - if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { + if (mmu_invalidate_retry(vcpu->kvm, mmu_seq)) { unlock_rmap(rmap); goto out_unlock; } @@ -734,11 +740,11 @@ void kvmppc_rmap_reset(struct kvm *kvm) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - int srcu_idx; + int srcu_idx, bkt; srcu_idx = srcu_read_lock(&kvm->srcu); slots = kvm_memslots(kvm); - kvm_for_each_memslot(memslot, slots) { + kvm_for_each_memslot(memslot, bkt, slots) { /* Mutual exclusion with kvm_unmap_hva_range etc. */ spin_lock(&kvm->mmu_lock); /* @@ -879,7 +885,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, struct revmap_entry *rev = kvm->arch.hpt.rev; unsigned long head, i, j; __be64 *hptep; - int ret = 0; + bool ret = false; unsigned long *rmapp; rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; @@ -887,7 +893,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, lock_rmap(rmapp); if (*rmapp & KVMPPC_RMAP_REFERENCED) { *rmapp &= ~KVMPPC_RMAP_REFERENCED; - ret = 1; + ret = true; } if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { unlock_rmap(rmapp); @@ -919,7 +925,7 @@ static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, rev[i].guest_rpte |= HPTE_R_R; note_hpte_modification(kvm, &rev[i]); } - ret = 1; + ret = true; } __unlock_hpte(hptep, be64_to_cpu(hptep[0])); } while ((i = j) != head); @@ -987,18 +993,6 @@ bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) return kvm_test_age_rmapp(kvm, range->slot, range->start); } -bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) -{ - WARN_ON(range->start + 1 != range->end); - - if (kvm_is_radix(kvm)) - kvm_unmap_radix(kvm, range->slot, range->start); - else - kvm_unmap_rmapp(kvm, range->slot, range->start); - - return false; -} - static int vcpus_running(struct kvm *kvm) { return atomic_read(&kvm->arch.vcpus_running) != 0; @@ -1194,7 +1188,7 @@ static int resize_hpt_allocate(struct kvm_resize_hpt *resize) if (rc < 0) return rc; - resize_hpt_debug(resize, "resize_hpt_allocate(): HPT @ 0x%lx\n", + resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n", __func__, resize->hpt.virt); return 0; @@ -1435,7 +1429,7 @@ static void resize_hpt_prepare_work(struct work_struct *work) */ mutex_unlock(&kvm->arch.mmu_setup_lock); - resize_hpt_debug(resize, "resize_hpt_prepare_work(): order = %d\n", + resize_hpt_debug(resize, "%s(): order = %d\n", __func__, resize->order); err = resize_hpt_allocate(resize); @@ -1460,8 +1454,8 @@ static void resize_hpt_prepare_work(struct work_struct *work) mutex_unlock(&kvm->arch.mmu_setup_lock); } -long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, - struct kvm_ppc_resize_hpt *rhpt) +int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, + struct kvm_ppc_resize_hpt *rhpt) { unsigned long flags = rhpt->flags; unsigned long shift = rhpt->shift; @@ -1526,13 +1520,13 @@ static void resize_hpt_boot_vcpu(void *opaque) /* Nothing to do, just force a KVM exit */ } -long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, - struct kvm_ppc_resize_hpt *rhpt) +int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, + struct kvm_ppc_resize_hpt *rhpt) { unsigned long flags = rhpt->flags; unsigned long shift = rhpt->shift; struct kvm_resize_hpt *resize; - long ret; + int ret; if (flags != 0 || kvm_is_radix(kvm)) return -EINVAL; @@ -1879,8 +1873,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf, ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, tmp); if (ret != H_SUCCESS) { - pr_err("kvm_htab_write ret %ld i=%ld v=%lx " - "r=%lx\n", ret, i, v, r); + pr_err("%s ret %ld i=%ld v=%lx r=%lx\n", __func__, ret, i, v, r); goto out; } if (!mmu_ready && is_vrma_hpte(v)) { @@ -2112,7 +2105,7 @@ static const struct file_operations debugfs_htab_fops = { void kvmppc_mmu_debugfs_init(struct kvm *kvm) { - debugfs_create_file("htab", 0400, kvm->arch.debugfs_dir, kvm, + debugfs_create_file("htab", 0400, kvm->debugfs_dentry, kvm, &debugfs_htab_fops); } |
