summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_mmu.h
diff options
context:
space:
mode:
authorJulien Grall <julien.grall@arm.com>2021-11-22 12:18:43 +0000
committerMarc Zyngier <maz@kernel.org>2022-02-08 14:57:03 +0000
commit3248136b3637e1671e4fa46e32e2122f9ec4bc3d (patch)
treef73f39b63f7e6ecf9800b7a796cc5a0b0b186949 /arch/arm64/include/asm/kvm_mmu.h
parentf8051e960922a9de8e42159103d5d9c697ef17ec (diff)
KVM: arm64: Align the VMID allocation with the arm64 ASID
At the moment, the VMID algorithm will send an SGI to all the CPUs to force an exit and then broadcast a full TLB flush and I-Cache invalidation. This patch uses the new VMID allocator. The benefits are:    - Aligns with arm64 ASID algorithm.    - CPUs are not forced to exit at roll-over. Instead, the VMID will be marked reserved and context invalidation is broadcasted. This will reduce the IPIs traffic.   - More flexible to add support for pinned KVM VMIDs in the future.     With the new algo, the code is now adapted:     - The call to update_vmid() will be done with preemption disabled as the new algo requires to store information per-CPU. Signed-off-by: Julien Grall <julien.grall@arm.com> Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20211122121844.867-4-shameerali.kolothum.thodi@huawei.com
Diffstat (limited to 'arch/arm64/include/asm/kvm_mmu.h')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 81839e9a8a24..74735a864eee 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -115,6 +115,7 @@ alternative_cb_end
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
+#include <asm/kvm_host.h>
void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
@@ -266,7 +267,8 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
baddr = mmu->pgd_phys;
- vmid_field = (u64)READ_ONCE(vmid->vmid) << VTTBR_VMID_SHIFT;
+ vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
+ vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
}