summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_hv_rm_mmu.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2017-09-11 15:29:45 +1000
committerPaul Mackerras <paulus@ozlabs.org>2017-11-01 15:36:06 +1100
commit8dc6cca556e4126f77b71cf8e0c45ccc78d1d213 (patch)
treee65a18826f1c90e7548cffec5c97cae7a45e9567 /arch/powerpc/kvm/book3s_hv_rm_mmu.c
parent3e8f150a3bc30214c15e5f8d27e4b2d904bd929e (diff)
KVM: PPC: Book3S HV: Don't rely on host's page size information
This removes the dependence of KVM on the mmu_psize_defs array (which stores information about hardware support for various page sizes) and the things derived from it, chiefly hpte_page_sizes[], hpte_page_size(), hpte_actual_page_size() and get_sllp_encoding(). We also no longer rely on the mmu_slb_size variable or the MMU_FTR_1T_SEGMENTS feature bit. The reason for doing this is so we can support a HPT guest on a radix host. In a radix host, the mmu_psize_defs array contains information about page sizes supported by the MMU in radix mode rather than the page sizes supported by the MMU in HPT mode. Similarly, mmu_slb_size and the MMU_FTR_1T_SEGMENTS bit are not set. Instead we hard-code knowledge of the behaviour of the HPT MMU in the POWER7, POWER8 and POWER9 processors (which are the only processors supported by HV KVM) - specifically the encoding of the LP fields in the HPT and SLB entries, and the fact that they have 32 SLB entries and support 1TB segments. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rm_mmu.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 4efe364f1188..cf98f17c1aa6 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -129,7 +129,7 @@ static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
unsigned long *rmap;
unsigned long gfn;
- gfn = hpte_rpn(hpte_gr, hpte_page_size(hpte_v, hpte_gr));
+ gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr));
memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
if (!memslot)
return NULL;
@@ -169,7 +169,8 @@ static void remove_revmap_chain(struct kvm *kvm, long pte_index,
}
*rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
if (rcbits & HPTE_R_C)
- kvmppc_update_rmap_change(rmap, hpte_page_size(hpte_v, hpte_r));
+ kvmppc_update_rmap_change(rmap,
+ kvmppc_actual_pgsz(hpte_v, hpte_r));
unlock_rmap(rmap);
}
@@ -193,7 +194,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
if (kvm_is_radix(kvm))
return H_FUNCTION;
- psize = hpte_page_size(pteh, ptel);
+ psize = kvmppc_actual_pgsz(pteh, ptel);
if (!psize)
return H_PARAMETER;
writing = hpte_is_writable(ptel);
@@ -848,7 +849,7 @@ long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
r = be64_to_cpu(hpte[1]);
gr |= r & (HPTE_R_R | HPTE_R_C);
if (r & HPTE_R_C) {
- unsigned long psize = hpte_page_size(v, r);
+ unsigned long psize = kvmppc_actual_pgsz(v, r);
hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
eieio();
rmap = revmap_for_hpte(kvm, v, gr);
@@ -1014,7 +1015,7 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
* Check the HPTE again, including base page size
*/
if ((v & valid) && (v & mask) == val &&
- hpte_base_page_size(v, r) == (1ul << pshift))
+ kvmppc_hpte_base_page_shift(v, r) == pshift)
/* Return with the HPTE still locked */
return (hash << 3) + (i >> 1);