diff options
| author | Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> | 2014-06-16 00:17:07 +0530 | 
|---|---|---|
| committer | Alexander Graf <agraf@suse.de> | 2014-06-25 14:07:06 +0200 | 
| commit | 341acbb3aabbcfbf069d7de4ad35f51b58176faf (patch) | |
| tree | f6ea84649db60b6bc5a56bf2e062c8db29046bbd | |
| parent | 511c66818d87db2a8931e7f7f92c7904bdd84f72 (diff) | |
KVM: PPC: BOOK3S: HV: Use base page size when comparing against slb value
With guests supporting Multiple page size per segment (MPSS),
hpte_page_size returns the actual page size used. Add a new function to
return base page size and use that to compare against the the page size
calculated from SLB. Without this patch a hpte lookup can fail since
we are comparing wrong page size in kvmppc_hv_find_lock_hpte.
Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: Alexander Graf <agraf@suse.de>
| -rw-r--r-- | arch/powerpc/include/asm/kvm_book3s_64.h | 19 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 | ||||
| -rw-r--r-- | arch/powerpc/kvm/book3s_hv_rm_mmu.c | 7 | 
3 files changed, 20 insertions, 8 deletions
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index fddb72b48ce9..d645428a65a4 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,  	return rb;  } -static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) +static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l, +					     bool is_base_size)  { +  	int size, a_psize;  	/* Look at the 8 bit LP value */  	unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1); @@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)  				continue;  			a_psize = __hpte_actual_psize(lp, size); -			if (a_psize != -1) +			if (a_psize != -1) { +				if (is_base_size) +					return 1ul << mmu_psize_defs[size].shift;  				return 1ul << mmu_psize_defs[a_psize].shift; +			}  		}  	}  	return 0;  } +static inline unsigned long hpte_page_size(unsigned long h, unsigned long l) +{ +	return __hpte_page_size(h, l, 0); +} + +static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l) +{ +	return __hpte_page_size(h, l, 1); +} +  static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)  {  	return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT; diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 80561074078d..68468d695f12 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,  				goto out;  			}  			if (!rma_setup && is_vrma_hpte(v)) { -				unsigned long psize = hpte_page_size(v, r); +				unsigned long psize = hpte_base_page_size(v, r);  				unsigned long senc = slb_pgsize_encoding(psize);  				unsigned long lpcr; diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 6e6224318c36..5a24d3c2b6b8 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,  			r = hpte[i+1];  			/* -			 * Check the HPTE again, including large page size -			 * Since we don't currently allow any MPSS (mixed -			 * page-size segment) page sizes, it is sufficient -			 * to check against the actual page size. +			 * Check the HPTE again, including base page size  			 */  			if ((v & valid) && (v & mask) == val && -			    hpte_page_size(v, r) == (1ul << pshift)) +			    hpte_base_page_size(v, r) == (1ul << pshift))  				/* Return with the HPTE still locked */  				return (hash << 3) + (i >> 1);  | 
