summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_hv_rm_mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rm_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c265
1 files changed, 206 insertions, 59 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 3b3791ed74a6..17cb75a127b0 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -1,7 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, version 2, as
- * published by the Free Software Foundation.
*
* Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*/
@@ -13,6 +11,7 @@
#include <linux/hugetlb.h>
#include <linux/module.h>
#include <linux/log2.h>
+#include <linux/sizes.h>
#include <asm/trace.h>
#include <asm/kvm_ppc.h>
@@ -24,20 +23,9 @@
#include <asm/pte-walk.h>
/* Translate address of a vmalloc'd thing to a linear map address */
-static void *real_vmalloc_addr(void *x)
+static void *real_vmalloc_addr(void *addr)
{
- unsigned long addr = (unsigned long) x;
- pte_t *p;
- /*
- * assume we don't have huge pages in vmalloc space...
- * So don't worry about THP collapse/split. Called
- * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
- */
- p = find_init_mm_pte(addr, NULL);
- if (!p || !pte_present(*p))
- return NULL;
- addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
- return __va(addr);
+ return __va(ppc_find_vmap_phys((unsigned long)addr));
}
/* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
@@ -58,17 +46,15 @@ static int global_invalidates(struct kvm *kvm)
else
global = 1;
+ /* LPID has been switched to host if in virt mode so can't do local */
+ if (!global && (mfmsr() & (MSR_IR|MSR_DR)))
+ global = 1;
+
if (!global) {
/* any other core might now have stale TLB entries... */
smp_wmb();
cpumask_setall(&kvm->arch.need_tlb_flush);
cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
- /*
- * On POWER9, threads are independent but the TLB is shared,
- * so use the bit for the first thread to represent the core.
- */
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- cpu = cpu_first_thread_sibling(cpu);
cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
}
@@ -100,7 +86,7 @@ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
} else {
rev->forw = rev->back = pte_index;
*rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
- pte_index | KVMPPC_RMAP_PRESENT;
+ pte_index | KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_HPT;
}
unlock_rmap(rmap);
}
@@ -211,10 +197,19 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pte_t *ptep;
unsigned int writing;
unsigned long mmu_seq;
- unsigned long rcbits, irq_flags = 0;
+ unsigned long rcbits;
if (kvm_is_radix(kvm))
return H_FUNCTION;
+ /*
+ * The HPTE gets used by compute_tlbie_rb() to set TLBIE bits, so
+ * these functions should work together -- must ensure a guest can not
+ * cause problems with the TLBIE that KVM executes.
+ */
+ if ((pteh >> HPTE_V_SSIZE_SHIFT) & 0x2) {
+ /* B=0b1x is a reserved value, disallow it. */
+ return H_PARAMETER;
+ }
psize = kvmppc_actual_pgsz(pteh, ptel);
if (!psize)
return H_PARAMETER;
@@ -224,7 +219,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
g_ptel = ptel;
/* used later to detect if we might have been invalidated */
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
/* Find the memslot (if any) for this address */
@@ -249,17 +244,9 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
/* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn);
- /*
- * If we had a page table table change after lookup, we would
- * retry via mmu_notifier_retry.
- */
- if (!realmode)
- local_irq_save(irq_flags);
- /*
- * If called in real mode we have MSR_EE = 0. Otherwise
- * we disable irq above.
- */
- ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
+
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift);
if (ptep) {
pte_t pte;
unsigned int host_pte_size;
@@ -273,13 +260,12 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* to <= host page size, if host is using hugepage
*/
if (host_pte_size < psize) {
- if (!realmode)
- local_irq_restore(flags);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
return H_PARAMETER;
}
pte = kvmppc_read_update_linux_pte(ptep, writing);
if (pte_present(pte) && !pte_protnone(pte)) {
- if (writing && !__pte_write(pte))
+ if (writing && !pte_write(pte))
/* make the actual HPTE be read-only */
ptel = hpte_make_readonly(ptel);
is_ci = pte_ci(pte);
@@ -288,8 +274,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
pa |= gpa & ~PAGE_MASK;
}
}
- if (!realmode)
- local_irq_restore(irq_flags);
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
ptel |= pa;
@@ -381,7 +366,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
rmap = real_vmalloc_addr(rmap);
lock_rmap(rmap);
/* Check for pending invalidations under the rmap chain lock */
- if (mmu_notifier_retry(kvm, mmu_seq)) {
+ if (mmu_invalidate_retry(kvm, mmu_seq)) {
/* inval in progress, write a non-present HPTE */
pteh |= HPTE_V_ABSENT;
pteh &= ~HPTE_V_VALID;
@@ -420,6 +405,7 @@ long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
vcpu->arch.pgdir, true,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_enter);
#ifdef __BIG_ENDIAN__
#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
@@ -434,6 +420,37 @@ static inline int is_mmio_hpte(unsigned long v, unsigned long r)
(HPTE_R_KEY_HI | HPTE_R_KEY_LO));
}
+static inline void fixup_tlbie_lpid(unsigned long rb_value, unsigned long lpid)
+{
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
+ /* Radix flush for a hash guest */
+
+ unsigned long rb,rs,prs,r,ric;
+
+ rb = PPC_BIT(52); /* IS = 2 */
+ rs = 0; /* lpid = 0 */
+ prs = 0; /* partition scoped */
+ r = 1; /* radix format */
+ ric = 0; /* RIC_FLSUH_TLB */
+
+ /*
+ * Need the extra ptesync to make sure we don't
+ * re-order the tlbie
+ */
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
+ : : "r"(rb), "i"(r), "i"(prs),
+ "i"(ric), "r"(rs) : "memory");
+ }
+
+ if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
+ asm volatile("ptesync": : :"memory");
+ asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
+ "r" (rb_value), "r" (lpid));
+ }
+}
+
static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
long npages, int global, bool need_sync)
{
@@ -452,16 +469,7 @@ static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
"r" (rbvalues[i]), "r" (kvm->arch.lpid));
}
- if (cpu_has_feature(CPU_FTR_P9_TLBIE_BUG)) {
- /*
- * Need the extra ptesync to make sure we don't
- * re-order the tlbie
- */
- asm volatile("ptesync": : :"memory");
- asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
- "r" (rbvalues[0]), "r" (kvm->arch.lpid));
- }
-
+ fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid);
asm volatile("eieio; tlbsync; ptesync" : : : "memory");
} else {
if (need_sync)
@@ -542,6 +550,7 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
&vcpu->arch.regs.gpr[4]);
}
+EXPORT_SYMBOL_GPL(kvmppc_h_remove);
long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
{
@@ -660,10 +669,10 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove);
long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
- unsigned long pte_index, unsigned long avpn,
- unsigned long va)
+ unsigned long pte_index, unsigned long avpn)
{
struct kvm *kvm = vcpu->kvm;
__be64 *hpte;
@@ -731,6 +740,7 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_protect);
long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -766,11 +776,12 @@ long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
r &= ~HPTE_GR_RESERVED;
}
- vcpu->arch.regs.gpr[4 + i * 2] = v;
- vcpu->arch.regs.gpr[5 + i * 2] = r;
+ kvmppc_set_gpr(vcpu, 4 + i * 2, v);
+ kvmppc_set_gpr(vcpu, 5 + i * 2, r);
}
return H_SUCCESS;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_read);
long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -813,12 +824,13 @@ long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
}
}
}
- vcpu->arch.regs.gpr[4] = gr;
+ kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref);
long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
unsigned long pte_index)
@@ -860,12 +872,146 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
kvmppc_set_dirty_from_hpte(kvm, v, gr);
}
}
- vcpu->arch.regs.gpr[4] = gr;
+ kvmppc_set_gpr(vcpu, 4, gr);
ret = H_SUCCESS;
out:
unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
return ret;
}
+EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod);
+
+static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq,
+ unsigned long gpa, int writing, unsigned long *hpa,
+ struct kvm_memory_slot **memslot_p)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct kvm_memory_slot *memslot;
+ unsigned long gfn, hva, pa, psize = PAGE_SHIFT;
+ unsigned int shift;
+ pte_t *ptep, pte;
+
+ /* Find the memslot for this address */
+ gfn = gpa >> PAGE_SHIFT;
+ memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
+ if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
+ return H_PARAMETER;
+
+ /* Translate to host virtual address */
+ hva = __gfn_to_hva_memslot(memslot, gfn);
+
+ /* Try to find the host pte for that virtual address */
+ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift);
+ if (!ptep)
+ return H_TOO_HARD;
+ pte = kvmppc_read_update_linux_pte(ptep, writing);
+ if (!pte_present(pte))
+ return H_TOO_HARD;
+
+ /* Convert to a physical address */
+ if (shift)
+ psize = 1UL << shift;
+ pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (psize - 1);
+ pa |= gpa & ~PAGE_MASK;
+
+ if (hpa)
+ *hpa = pa;
+ if (memslot_p)
+ *memslot_p = memslot;
+
+ return H_SUCCESS;
+}
+
+static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu,
+ unsigned long dest)
+{
+ struct kvm_memory_slot *memslot;
+ struct kvm *kvm = vcpu->kvm;
+ unsigned long pa, mmu_seq;
+ long ret = H_SUCCESS;
+ int i;
+
+ /* Used later to detect if we might have been invalidated */
+ mmu_seq = kvm->mmu_invalidate_seq;
+ smp_rmb();
+
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot);
+ if (ret != H_SUCCESS)
+ goto out_unlock;
+
+ /* Zero the page */
+ for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES)
+ dcbz((void *)pa);
+ kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
+
+out_unlock:
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+ return ret;
+}
+
+static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu,
+ unsigned long dest, unsigned long src)
+{
+ unsigned long dest_pa, src_pa, mmu_seq;
+ struct kvm_memory_slot *dest_memslot;
+ struct kvm *kvm = vcpu->kvm;
+ long ret = H_SUCCESS;
+
+ /* Used later to detect if we might have been invalidated */
+ mmu_seq = kvm->mmu_invalidate_seq;
+ smp_rmb();
+
+ arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock);
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot);
+ if (ret != H_SUCCESS)
+ goto out_unlock;
+
+ ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL);
+ if (ret != H_SUCCESS)
+ goto out_unlock;
+
+ /* Copy the page */
+ memcpy((void *)dest_pa, (void *)src_pa, SZ_4K);
+
+ kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE);
+
+out_unlock:
+ arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock);
+ return ret;
+}
+
+long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags,
+ unsigned long dest, unsigned long src)
+{
+ struct kvm *kvm = vcpu->kvm;
+ u64 pg_mask = SZ_4K - 1; /* 4K page size */
+ long ret = H_SUCCESS;
+
+ /* Don't handle radix mode here, go up to the virtual mode handler */
+ if (kvm_is_radix(kvm))
+ return H_TOO_HARD;
+
+ /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */
+ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE |
+ H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED))
+ return H_PARAMETER;
+
+ /* dest (and src if copy_page flag set) must be page aligned */
+ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask)))
+ return H_PARAMETER;
+
+ /* zero and/or copy the page as determined by the flags */
+ if (flags & H_COPY_PAGE)
+ ret = kvmppc_do_h_page_init_copy(vcpu, dest, src);
+ else if (flags & H_ZERO_PAGE)
+ ret = kvmppc_do_h_page_init_zero(vcpu, dest);
+
+ /* We can ignore the other flags */
+
+ return ret;
+}
void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
unsigned long pte_index)
@@ -1096,7 +1242,7 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
if (!data) {
if (gr & (HPTE_R_N | HPTE_R_G))
- return status | SRR1_ISI_N_OR_G;
+ return status | SRR1_ISI_N_G_OR_CIP;
if (!hpte_read_permission(pp, slb_v & key))
return status | SRR1_ISI_PROT;
} else if (status & DSISR_ISSTORE) {
@@ -1151,3 +1297,4 @@ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
return -1; /* send fault up to host kernel mode */
}
+EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);