summaryrefslogtreecommitdiff
path: root/virt/kvm/pfncache.c
diff options
context:
space:
mode:
authorMark Brown <broonie@kernel.org>2022-09-13 17:43:23 +0100
committerMark Brown <broonie@kernel.org>2022-09-13 17:43:23 +0100
commitca9b8f0486b577ab179664d4a279090645e9244f (patch)
treef1e926d374b793470c1ce5a2e9628ab54835e007 /virt/kvm/pfncache.c
parent69a673c9e54d952cf404f80169d3100b7a9645bb (diff)
parent1cc5a52e873a4f9725eafe5aa9cd213b7b58e29e (diff)
MediaTek Helio X10 MT6795 - MT6331/6332 Regulators
Merge series from AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>: In an effort to give some love to the apparently forgotten MT6795 SoC, I am upstreaming more components that are necessary to support platforms powered by this one apart from a simple boot to serial console. This series adds support for the regulators found in MT6331 and MT6332 main/companion PMICs. Adding support to each driver in each subsystem is done in different patch series as to avoid spamming uninteresting patches to maintainers. Tested on a MT6795 Sony Xperia M5 (codename "Holly") smartphone.
Diffstat (limited to 'virt/kvm/pfncache.c')
-rw-r--r--virt/kvm/pfncache.c17
1 files changed, 9 insertions, 8 deletions
diff --git a/virt/kvm/pfncache.c b/virt/kvm/pfncache.c
index ab519f72f2cd..68ff41d39545 100644
--- a/virt/kvm/pfncache.c
+++ b/virt/kvm/pfncache.c
@@ -112,27 +112,28 @@ static inline bool mmu_notifier_retry_cache(struct kvm *kvm, unsigned long mmu_s
{
/*
* mn_active_invalidate_count acts for all intents and purposes
- * like mmu_notifier_count here; but the latter cannot be used
- * here because the invalidation of caches in the mmu_notifier
- * event occurs _before_ mmu_notifier_count is elevated.
+ * like mmu_invalidate_in_progress here; but the latter cannot
+ * be used here because the invalidation of caches in the
+ * mmu_notifier event occurs _before_ mmu_invalidate_in_progress
+ * is elevated.
*
* Note, it does not matter that mn_active_invalidate_count
* is not protected by gpc->lock. It is guaranteed to
* be elevated before the mmu_notifier acquires gpc->lock, and
- * isn't dropped until after mmu_notifier_seq is updated.
+ * isn't dropped until after mmu_invalidate_seq is updated.
*/
if (kvm->mn_active_invalidate_count)
return true;
/*
* Ensure mn_active_invalidate_count is read before
- * mmu_notifier_seq. This pairs with the smp_wmb() in
+ * mmu_invalidate_seq. This pairs with the smp_wmb() in
* mmu_notifier_invalidate_range_end() to guarantee either the
* old (non-zero) value of mn_active_invalidate_count or the
- * new (incremented) value of mmu_notifier_seq is observed.
+ * new (incremented) value of mmu_invalidate_seq is observed.
*/
smp_rmb();
- return kvm->mmu_notifier_seq != mmu_seq;
+ return kvm->mmu_invalidate_seq != mmu_seq;
}
static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
@@ -155,7 +156,7 @@ static kvm_pfn_t hva_to_pfn_retry(struct kvm *kvm, struct gfn_to_pfn_cache *gpc)
gpc->valid = false;
do {
- mmu_seq = kvm->mmu_notifier_seq;
+ mmu_seq = kvm->mmu_invalidate_seq;
smp_rmb();
write_unlock_irq(&gpc->lock);