summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/mmu.c
diff options
context:
space:
mode:
authorOliver Upton <oliver.upton@linux.dev>2023-02-13 22:33:10 +0000
committerOliver Upton <oliver.upton@linux.dev>2023-02-13 22:33:10 +0000
commit52b603628a2c06be38648aa025daa81e4355a7fc (patch)
treecdaf10e6f49c254f45be23e0b7888c114cad1972 /arch/arm64/kvm/mmu.c
parente8789ab7047a824fa415948d511634fea0c3aea3 (diff)
parent1dfc3e905089a0bcada268fb5691a605655e0319 (diff)
Merge branch kvm-arm64/parallel-access-faults into kvmarm/next
* kvm-arm64/parallel-access-faults: : Parallel stage-2 access fault handling : : The parallel faults changes that went in to 6.2 covered most stage-2 : aborts, with the exception of stage-2 access faults. Building on top of : the new infrastructure, this series adds support for handling access : faults (i.e. updating the access flag) in parallel. : : This is expected to provide a performance uplift for cores that do not : implement FEAT_HAFDBS, such as those from the fruit company. KVM: arm64: Condition HW AF updates on config option KVM: arm64: Handle access faults behind the read lock KVM: arm64: Don't serialize if the access flag isn't set KVM: arm64: Return EAGAIN for invalid PTE in attr walker KVM: arm64: Ignore EAGAIN for walks outside of a fault KVM: arm64: Use KVM's pte type/helpers in handle_access_fault() Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/kvm/mmu.c')
-rw-r--r--arch/arm64/kvm/mmu.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 01352f5838a0..cfc7777fa490 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1383,7 +1383,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
else
ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
__pfn_to_phys(pfn), prot,
- memcache, KVM_PGTABLE_WALK_SHARED);
+ memcache,
+ KVM_PGTABLE_WALK_HANDLE_FAULT |
+ KVM_PGTABLE_WALK_SHARED);
/* Mark the page dirty only if the fault is handled successfully */
if (writable && !ret) {
@@ -1401,20 +1403,18 @@ out_unlock:
/* Resolve the access fault by making the page young again. */
static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa)
{
- pte_t pte;
- kvm_pte_t kpte;
+ kvm_pte_t pte;
struct kvm_s2_mmu *mmu;
trace_kvm_access_fault(fault_ipa);
- write_lock(&vcpu->kvm->mmu_lock);
+ read_lock(&vcpu->kvm->mmu_lock);
mmu = vcpu->arch.hw_mmu;
- kpte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
- write_unlock(&vcpu->kvm->mmu_lock);
+ pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa);
+ read_unlock(&vcpu->kvm->mmu_lock);
- pte = __pte(kpte);
- if (pte_valid(pte))
- kvm_set_pfn_accessed(pte_pfn(pte));
+ if (kvm_pte_valid(pte))
+ kvm_set_pfn_accessed(kvm_pte_to_pfn(pte));
}
/**