summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2017-11-09 14:30:24 +1100
committerPaul Mackerras <paulus@ozlabs.org>2017-11-09 14:30:24 +1100
commit072df8130c6b602c8ee219f7b06394680cafad2f (patch)
treead7cbc2342be72236851b19a5d9e8a0908dbbdf7 /arch/powerpc/kvm
parentc01015091a77035de1939ef106bfbcaf9a21395f (diff)
parent38c53af853069adf87181684370d7b8866d6387b (diff)
Merge branch 'kvm-ppc-fixes' into kvm-ppc-next
This merges in a couple of fixes from the kvm-ppc-fixes branch that modify the same areas of code as some commits from the kvm-ppc-next branch, in order to resolve the conflicts. Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c10
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c23
-rw-r--r--arch/powerpc/kvm/book3s_hv.c49
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S13
-rw-r--r--arch/powerpc/kvm/powerpc.c2
5 files changed, 63 insertions, 34 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 6aec8a22aeff..235319c2574e 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -651,6 +651,16 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
hnow_v = hpte_new_to_old_v(hnow_v, hnow_r);
hnow_r = hpte_new_to_old_r(hnow_r);
}
+
+ /*
+ * If the HPT is being resized, don't update the HPTE,
+ * instead let the guest retry after the resize operation is complete.
+ * The synchronization for mmu_ready test vs. set is provided
+ * by the HPTE lock.
+ */
+ if (!kvm->arch.mmu_ready)
+ goto out_unlock;
+
if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] ||
rev->guest_rpte != hpte[2])
/* HPTE has been changed under us; let the guest retry */
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 8f2da8bba737..4dffa611376d 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -478,28 +478,30 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
return ret;
dir = iommu_tce_direction(tce);
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+
if ((dir != DMA_NONE) && kvmppc_gpa_to_ua(vcpu->kvm,
- tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL))
- return H_PARAMETER;
+ tce & ~(TCE_PCI_READ | TCE_PCI_WRITE), &ua, NULL)) {
+ ret = H_PARAMETER;
+ goto unlock_exit;
+ }
entry = ioba >> stt->page_shift;
list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
- if (dir == DMA_NONE) {
+ if (dir == DMA_NONE)
ret = kvmppc_tce_iommu_unmap(vcpu->kvm,
stit->tbl, entry);
- } else {
- idx = srcu_read_lock(&vcpu->kvm->srcu);
+ else
ret = kvmppc_tce_iommu_map(vcpu->kvm, stit->tbl,
entry, ua, dir);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
- }
if (ret == H_SUCCESS)
continue;
if (ret == H_TOO_HARD)
- return ret;
+ goto unlock_exit;
WARN_ON_ONCE(1);
kvmppc_clear_tce(stit->tbl, entry);
@@ -507,7 +509,10 @@ long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
kvmppc_tce_put(stt, entry, tce);
- return H_SUCCESS;
+unlock_exit:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index fff62fdf1464..ca0d4d938d6a 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -2717,11 +2717,13 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
* Hard-disable interrupts, and check resched flag and signals.
* If we need to reschedule or deliver a signal, clean up
* and return without going into the guest(s).
+ * If the mmu_ready flag has been cleared, don't go into the
+ * guest because that means a HPT resize operation is in progress.
*/
local_irq_disable();
hard_irq_disable();
if (lazy_irq_pending() || need_resched() ||
- recheck_signals(&core_info)) {
+ recheck_signals(&core_info) || !vc->kvm->arch.mmu_ready) {
local_irq_enable();
vc->vcore_state = VCORE_INACTIVE;
/* Unlock all except the primary vcore */
@@ -3120,7 +3122,7 @@ out:
static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
- int n_ceded, i;
+ int n_ceded, i, r;
struct kvmppc_vcore *vc;
struct kvm_vcpu *v;
@@ -3174,6 +3176,30 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
!signal_pending(current)) {
+ /* See if the MMU is ready to go */
+ if (!vcpu->kvm->arch.mmu_ready) {
+ spin_unlock(&vc->lock);
+ mutex_lock(&vcpu->kvm->lock);
+ r = 0;
+ if (!vcpu->kvm->arch.mmu_ready) {
+ if (!kvm_is_radix(vcpu->kvm))
+ r = kvmppc_hv_setup_htab_rma(vcpu);
+ if (!r) {
+ if (cpu_has_feature(CPU_FTR_ARCH_300))
+ kvmppc_setup_partition_table(vcpu->kvm);
+ vcpu->kvm->arch.mmu_ready = 1;
+ }
+ }
+ mutex_unlock(&vcpu->kvm->lock);
+ spin_lock(&vc->lock);
+ if (r) {
+ kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
+ kvm_run->fail_entry.hardware_entry_failure_reason = 0;
+ vcpu->arch.ret = r;
+ break;
+ }
+ }
+
if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
kvmppc_vcore_end_preempt(vc);
@@ -3293,24 +3319,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */
smp_mb();
- /* On the first time here, set up MMU if necessary */
- if (!vcpu->kvm->arch.mmu_ready) {
- mutex_lock(&kvm->lock);
- r = 0;
- if (!kvm->arch.mmu_ready) {
- if (!kvm_is_radix(vcpu->kvm))
- r = kvmppc_hv_setup_htab_rma(vcpu);
- if (!r) {
- if (cpu_has_feature(CPU_FTR_ARCH_300))
- kvmppc_setup_partition_table(kvm);
- kvm->arch.mmu_ready = 1;
- }
- }
- mutex_unlock(&kvm->lock);
- if (r)
- goto out;
- }
-
flush_all_to_thread(current);
/* Save userspace EBB and other register values */
@@ -3358,7 +3366,6 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
mtspr(SPRN_VRSAVE, user_vrsave);
- out:
vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
atomic_dec(&vcpu->kvm->arch.vcpus_running);
return r;
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 7add18930e6d..2659844784b8 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1025,13 +1025,14 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
beq no_xive
ld r11, VCPU_XIVE_SAVED_STATE(r4)
li r9, TM_QW1_OS
- stdcix r11,r9,r10
eieio
+ stdcix r11,r9,r10
lwz r11, VCPU_XIVE_CAM_WORD(r4)
li r9, TM_QW1_OS + TM_WORD2
stwcix r11,r9,r10
li r9, 1
stw r9, VCPU_XIVE_PUSHED(r4)
+ eieio
no_xive:
#endif /* CONFIG_KVM_XICS */
@@ -1346,6 +1347,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
bne 3f
BEGIN_FTR_SECTION
PPC_MSGSYNC
+ lwsync
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
@@ -1436,8 +1438,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0
beq 1f
/* First load to pull the context, we ignore the value */
- lwzx r11, r7, r10
eieio
+ lwzx r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */
ldx r11, r6, r10
b 3f
@@ -1445,8 +1447,8 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
cmpldi cr0, r10, 0
beq 1f
/* First load to pull the context, we ignore the value */
- lwzcix r11, r7, r10
eieio
+ lwzcix r11, r7, r10
/* Second load to recover the context state (Words 0 and 1) */
ldcix r11, r6, r10
3: std r11, VCPU_XIVE_SAVED_STATE(r9)
@@ -1456,6 +1458,7 @@ guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
stw r10, VCPU_XIVE_PUSHED(r9)
stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
+ eieio
1:
#endif /* CONFIG_KVM_XICS */
/* Save more register state */
@@ -2838,6 +2841,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
PPC_MSGCLR(6)
/* see if it's a host IPI */
li r3, 1
+BEGIN_FTR_SECTION
+ PPC_MSGSYNC
+ lwsync
+END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
lbz r0, HSTATE_HOST_IPI(r13)
cmpwi r0, 0
bnelr
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a0b7f094de78..6b6c53c42ac9 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -643,7 +643,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
break;
#endif
case KVM_CAP_PPC_HTM:
- r = is_kvmppc_hv_enabled(kvm) &&
+ r = hv_enabled &&
(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM_COMP);
break;
default: