summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/book3s_hv_builtin.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2017-10-19 14:11:23 +1100
committerPaul Mackerras <paulus@ozlabs.org>2017-11-01 15:36:41 +1100
commitc01015091a77035de1939ef106bfbcaf9a21395f (patch)
tree6054a2e6b854e487e30d04616f6c9f14e95580fc /arch/powerpc/kvm/book3s_hv_builtin.c
parent516f7898ae20d9dd902a85522676055a4de9dc9b (diff)
KVM: PPC: Book3S HV: Run HPT guests on POWER9 radix hosts
This patch removes the restriction that a radix host can only run radix guests, allowing us to run HPT (hashed page table) guests as well. This is useful because it provides a way to run old guest kernels that know about POWER8 but not POWER9. Unfortunately, POWER9 currently has a restriction that all threads in a given code must either all be in HPT mode, or all in radix mode. This means that when entering a HPT guest, we have to obtain control of all 4 threads in the core and get them to switch their LPIDR and LPCR registers, even if they are not going to run a guest. On guest exit we also have to get all threads to switch LPIDR and LPCR back to host values. To make this feasible, we require that KVM not be in the "independent threads" mode, and that the CPU cores be in single-threaded mode from the host kernel's perspective (only thread 0 online; threads 1, 2 and 3 offline). That allows us to use the same code as on POWER8 for obtaining control of the secondary threads. To manage the LPCR/LPIDR changes required, we extend the kvm_split_info struct to contain the information needed by the secondary threads. All threads perform a barrier synchronization (where all threads wait for every other thread to reach the synchronization point) on guest entry, both before and after loading LPCR and LPIDR. On guest exit, they all once again perform a barrier synchronization both before and after loading host values into LPCR and LPIDR. Finally, it is also currently necessary to flush the entire TLB every time we enter a HPT guest on a radix host. We do this on thread 0 with a loop of tlbiel instructions. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_builtin.c')
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c99
1 files changed, 98 insertions, 1 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index e38cc2df6d2a..49a2c7825e04 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -278,7 +278,8 @@ void kvmhv_commence_exit(int trap)
struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore;
int ptid = local_paca->kvm_hstate.ptid;
struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode;
- int me, ee, i;
+ int me, ee, i, t;
+ int cpu0;
/* Set our bit in the threads-exiting-guest map in the 0xff00
bits of vcore->entry_exit_map */
@@ -320,6 +321,22 @@ void kvmhv_commence_exit(int trap)
if ((ee >> 8) == 0)
kvmhv_interrupt_vcore(vc, ee);
}
+
+ /*
+ * On POWER9 when running a HPT guest on a radix host (sip != NULL),
+ * we have to interrupt inactive CPU threads to get them to
+ * restore the host LPCR value.
+ */
+ if (sip->lpcr_req) {
+ if (cmpxchg(&sip->do_restore, 0, 1) == 0) {
+ vc = local_paca->kvm_hstate.kvm_vcore;
+ cpu0 = vc->pcpu + ptid - local_paca->kvm_hstate.tid;
+ for (t = 1; t < threads_per_core; ++t) {
+ if (sip->napped[t])
+ kvmhv_rm_send_ipi(cpu0 + t);
+ }
+ }
+ }
}
struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
@@ -619,3 +636,83 @@ void kvmppc_bad_interrupt(struct pt_regs *regs)
die("Bad interrupt in KVM entry/exit code", regs, SIGABRT);
panic("Bad KVM trap");
}
+
+/*
+ * Functions used to switch LPCR HR and UPRT bits on all threads
+ * when entering and exiting HPT guests on a radix host.
+ */
+
+#define PHASE_REALMODE 1 /* in real mode */
+#define PHASE_SET_LPCR 2 /* have set LPCR */
+#define PHASE_OUT_OF_GUEST 4 /* have finished executing in guest */
+#define PHASE_RESET_LPCR 8 /* have reset LPCR to host value */
+
+#define ALL(p) (((p) << 24) | ((p) << 16) | ((p) << 8) | (p))
+
+static void wait_for_sync(struct kvm_split_mode *sip, int phase)
+{
+ int thr = local_paca->kvm_hstate.tid;
+
+ sip->lpcr_sync.phase[thr] |= phase;
+ phase = ALL(phase);
+ while ((sip->lpcr_sync.allphases & phase) != phase) {
+ HMT_low();
+ barrier();
+ }
+ HMT_medium();
+}
+
+void kvmhv_p9_set_lpcr(struct kvm_split_mode *sip)
+{
+ unsigned long rb, set;
+
+ /* wait for every other thread to get to real mode */
+ wait_for_sync(sip, PHASE_REALMODE);
+
+ /* Set LPCR and LPIDR */
+ mtspr(SPRN_LPCR, sip->lpcr_req);
+ mtspr(SPRN_LPID, sip->lpidr_req);
+ isync();
+
+ /* Invalidate the TLB on thread 0 */
+ if (local_paca->kvm_hstate.tid == 0) {
+ sip->do_set = 0;
+ asm volatile("ptesync" : : : "memory");
+ for (set = 0; set < POWER9_TLB_SETS_RADIX; ++set) {
+ rb = TLBIEL_INVAL_SET_LPID +
+ (set << TLBIEL_INVAL_SET_SHIFT);
+ asm volatile(PPC_TLBIEL(%0, %1, 0, 0, 0) : :
+ "r" (rb), "r" (0));
+ }
+ asm volatile("ptesync" : : : "memory");
+ }
+
+ /* indicate that we have done so and wait for others */
+ wait_for_sync(sip, PHASE_SET_LPCR);
+ /* order read of sip->lpcr_sync.allphases vs. sip->do_set */
+ smp_rmb();
+}
+
+/*
+ * Called when a thread that has been in the guest needs
+ * to reload the host LPCR value - but only on POWER9 when
+ * running a HPT guest on a radix host.
+ */
+void kvmhv_p9_restore_lpcr(struct kvm_split_mode *sip)
+{
+ /* we're out of the guest... */
+ wait_for_sync(sip, PHASE_OUT_OF_GUEST);
+
+ mtspr(SPRN_LPID, 0);
+ mtspr(SPRN_LPCR, sip->host_lpcr);
+ isync();
+
+ if (local_paca->kvm_hstate.tid == 0) {
+ sip->do_restore = 0;
+ smp_wmb(); /* order store of do_restore vs. phase */
+ }
+
+ wait_for_sync(sip, PHASE_RESET_LPCR);
+ smp_mb();
+ local_paca->kvm_hstate.kvm_split_mode = NULL;
+}