summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2019-04-29 19:00:40 +1000
committerPaul Mackerras <paulus@ozlabs.org>2019-04-30 19:32:01 +1000
commit2940ba0c48bf18e15e85cbb0f26c0e88e1211587 (patch)
tree314b4cb4da889bfeb6c653a534bf2afe63689e1a /arch/powerpc
parent7ae9bda7edf3dac56c9abaeec264ac08d399a1ff (diff)
KVM: PPC: Book3S HV: Move HPT guest TLB flushing to C code
This replaces assembler code in book3s_hv_rmhandlers.S that checks the kvm->arch.need_tlb_flush cpumask and optionally does a TLB flush with C code in book3s_hv_builtin.c. Note that unlike the radix version, the hash version doesn't do an explicit ERAT invalidation because we will invalidate and load up the SLB before entering the guest, and that will invalidate the ERAT. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h2
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c29
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S38
3 files changed, 35 insertions, 34 deletions
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 05128e917a83..0975f5f2fd70 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -476,6 +476,8 @@ extern void kvm_hv_vm_activated(void);
extern void kvm_hv_vm_deactivated(void);
extern bool kvm_hv_mode_active(void);
+extern void kvmppc_hpt_check_need_tlb_flush(struct kvm *kvm);
+
#else
static inline void __init kvm_cma_reserve(void)
{}
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index b0cf22477e87..489abe5d9797 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -805,3 +805,32 @@ void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu)
vcpu->arch.doorbell_request = 0;
}
}
+
+void kvmppc_hpt_check_need_tlb_flush(struct kvm *kvm)
+{
+ int pcpu = raw_smp_processor_id();
+ unsigned long rb, set;
+
+ /*
+ * On POWER9, individual threads can come in here, but the
+ * TLB is shared between the 4 threads in a core, hence
+ * invalidating on one thread invalidates for all.
+ * Thus we make all 4 threads use the same bit.
+ */
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ pcpu = cpu_first_thread_sibling(pcpu);
+
+ if (cpumask_test_cpu(pcpu, &kvm->arch.need_tlb_flush)) {
+ rb = PPC_BIT(52); /* IS = 2 */
+ for (set = 0; set < kvm->arch.tlb_sets; ++set) {
+ asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
+ : : "r" (rb), "i" (0), "i" (0), "i" (0),
+ "r" (0) : "memory");
+ rb += PPC_BIT(51); /* increment set number */
+ }
+ asm volatile("ptesync": : :"memory");
+
+ /* Clear the bit after the TLB flush */
+ cpumask_clear_cpu(pcpu, &kvm->arch.need_tlb_flush);
+ }
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 31f914742fdd..9d2fea43b0c5 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -623,40 +623,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
isync
/* See if we need to flush the TLB. Hash has to be done in RM */
- lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
-BEGIN_FTR_SECTION
- /*
- * On POWER9, individual threads can come in here, but the
- * TLB is shared between the 4 threads in a core, hence
- * invalidating on one thread invalidates for all.
- * Thus we make all 4 threads use the same bit here.
- */
- clrrdi r6,r6,2
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
- clrldi r7,r6,64-6 /* extract bit number (6 bits) */
- srdi r6,r6,6 /* doubleword number */
- sldi r6,r6,3 /* address offset */
- add r6,r6,r9
- addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
- li r8,1
- sld r8,r8,r7
- ld r7,0(r6)
- and. r7,r7,r8
- beq 22f
- /* Flush the TLB of any entries for this LPID */
- lwz r0,KVM_TLB_SETS(r9)
- mtctr r0
- li r7,0x800 /* IS field = 0b10 */
- ptesync
- li r0,0 /* RS for P9 version of tlbiel */
-28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
- addi r7,r7,0x1000
- bdnz 28b
- ptesync
-23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
- andc r7,r7,r8
- stdcx. r7,0,r6
- bne 23b
+ mr r3, r9 /* kvm pointer */
+ bl kvmppc_hpt_check_need_tlb_flush
+ nop
+ ld r5, HSTATE_KVM_VCORE(r13)
/* Add timebase offset onto timebase */
22: ld r8,VCORE_TB_OFFSET(r5)