summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu
diff options
context:
space:
mode:
authorLai Jiangshan <laijs@linux.alibaba.com>2021-10-19 19:01:53 +0800
committerPaolo Bonzini <pbonzini@redhat.com>2021-10-22 05:43:21 -0400
commit264d3dc1d3dca13b7eaf0c4fa7a4b2c91a5e056a (patch)
tree9287dedd4681a9118d5d4f4c4326845e1e4e76eb /arch/x86/kvm/mmu
parent509bfe3d979672cd69c318d520420cf95b474fd9 (diff)
KVM: X86: pair smp_wmb() of mmu_try_to_unsync_pages() with smp_rmb()
The commit 578e1c4db2213 ("kvm: x86: Avoid taking MMU lock in kvm_mmu_sync_roots if no sync is needed") added smp_wmb() in mmu_try_to_unsync_pages(), but the corresponding smp_load_acquire() isn't used on the load of SPTE.W. smp_load_acquire() orders _subsequent_ loads after sp->is_unsync; it does not order _earlier_ loads before the load of sp->is_unsync. This has no functional change; smp_rmb() is a NOP on x86, and no compiler barrier is required because there is a VMEXIT between the load of SPTE.W and kvm_mmu_snc_roots. Cc: Junaid Shahid <junaids@google.com> Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com> Message-Id: <20211019110154.4091-4-jiangshanlai@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu')
-rw-r--r--arch/x86/kvm/mmu/mmu.c41
1 files changed, 27 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index f9f228963088..cb7622e93419 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2669,8 +2669,8 @@ int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
* (sp->unsync = true)
*
* The write barrier below ensures that 1.1 happens before 1.2 and thus
- * the situation in 2.4 does not arise. The implicit barrier in 2.2
- * pairs with this write barrier.
+ * the situation in 2.4 does not arise. It pairs with the read barrier
+ * in is_unsync_root(), placed between 2.1's load of SPTE.W and 2.3.
*/
smp_wmb();
@@ -3643,6 +3643,30 @@ err_pml4:
#endif
}
+static bool is_unsync_root(hpa_t root)
+{
+ struct kvm_mmu_page *sp;
+
+ /*
+ * The read barrier orders the CPU's read of SPTE.W during the page table
+ * walk before the reads of sp->unsync/sp->unsync_children here.
+ *
+ * Even if another CPU was marking the SP as unsync-ed simultaneously,
+ * any guest page table changes are not guaranteed to be visible anyway
+ * until this VCPU issues a TLB flush strictly after those changes are
+ * made. We only need to ensure that the other CPU sets these flags
+ * before any actual changes to the page tables are made. The comments
+ * in mmu_try_to_unsync_pages() describe what could go wrong if this
+ * requirement isn't satisfied.
+ */
+ smp_rmb();
+ sp = to_shadow_page(root);
+ if (sp->unsync || sp->unsync_children)
+ return true;
+
+ return false;
+}
+
void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
{
int i;
@@ -3660,18 +3684,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
hpa_t root = vcpu->arch.mmu->root_hpa;
sp = to_shadow_page(root);
- /*
- * Even if another CPU was marking the SP as unsync-ed
- * simultaneously, any guest page table changes are not
- * guaranteed to be visible anyway until this VCPU issues a TLB
- * flush strictly after those changes are made. We only need to
- * ensure that the other CPU sets these flags before any actual
- * changes to the page tables are made. The comments in
- * mmu_try_to_unsync_pages() describe what could go wrong if
- * this requirement isn't satisfied.
- */
- if (!smp_load_acquire(&sp->unsync) &&
- !smp_load_acquire(&sp->unsync_children))
+ if (!is_unsync_root(root))
return;
write_lock(&vcpu->kvm->mmu_lock);