diff options
Diffstat (limited to 'arch/arm64/include/asm/kvm_mmu.h')
| -rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 74 |
1 files changed, 72 insertions, 2 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index d5e48d870461..2dc5e6e742bb 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h @@ -49,7 +49,7 @@ * mappings, and none of this applies in that case. */ -#ifdef __ASSEMBLY__ +#ifdef __ASSEMBLER__ #include <asm/alternative.h> @@ -98,6 +98,7 @@ alternative_cb_end #include <asm/mmu_context.h> #include <asm/kvm_emulate.h> #include <asm/kvm_host.h> +#include <asm/kvm_nested.h> void kvm_update_va_mask(struct alt_instr *alt, __le32 *origptr, __le32 *updptr, int nr_inst); @@ -138,6 +139,8 @@ static __always_inline unsigned long __kern_hyp_va(unsigned long v) #define kern_hyp_va(v) ((typeof(v))(__kern_hyp_va((unsigned long)(v)))) +extern u32 __hyp_va_bits; + /* * We currently support using a VM-specified IPA size. For backward * compatibility, the default IPA size is fixed to 40bits. @@ -165,6 +168,11 @@ int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr); void __init free_hyp_pgds(void); +void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start, + u64 size, bool may_block); +void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end); +void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end); + void stage2_unmap_vm(struct kvm *kvm); int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type); void kvm_uninit_stage2_mmu(struct kvm *kvm); @@ -172,6 +180,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu); int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, phys_addr_t pa, unsigned long size, bool writable); +int kvm_handle_guest_sea(struct kvm_vcpu *vcpu); int kvm_handle_guest_abort(struct kvm_vcpu *vcpu); phys_addr_t kvm_mmu_get_httbr(void); @@ -326,5 +335,66 @@ static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu) { return container_of(mmu->arch, struct kvm, arch); } -#endif /* __ASSEMBLY__ */ + +static inline u64 get_vmid(u64 vttbr) +{ + return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >> + VTTBR_VMID_SHIFT; +} + +static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu) +{ + return !(mmu->tlb_vttbr & VTTBR_CNP_BIT); +} + +static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu) +{ + /* + * Be careful, mmu may not be fully initialised so do look at + * *any* of its fields. + */ + return &kvm->arch.mmu != mmu; +} + +static inline void kvm_fault_lock(struct kvm *kvm) +{ + if (is_protected_kvm_enabled()) + write_lock(&kvm->mmu_lock); + else + read_lock(&kvm->mmu_lock); +} + +static inline void kvm_fault_unlock(struct kvm *kvm) +{ + if (is_protected_kvm_enabled()) + write_unlock(&kvm->mmu_lock); + else + read_unlock(&kvm->mmu_lock); +} + +/* + * ARM64 KVM relies on a simple conversion from physaddr to a kernel + * virtual address (KVA) when it does cache maintenance as the CMO + * instructions work on virtual addresses. This is incompatible with + * VM_PFNMAP VMAs which may not have a kernel direct mapping to a + * virtual address. + * + * With S2FWB and CACHE DIC features, KVM need not do cache flushing + * and CMOs are NOP'd. This has the effect of no longer requiring a + * KVA for addresses mapped into the S2. The presence of these features + * are thus necessary to support cacheable S2 mapping of VM_PFNMAP. + */ +static inline bool kvm_supports_cacheable_pfnmap(void) +{ + return cpus_have_final_cap(ARM64_HAS_STAGE2_FWB) && + cpus_have_final_cap(ARM64_HAS_CACHE_DIC); +} + +#ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS +void kvm_s2_ptdump_create_debugfs(struct kvm *kvm); +#else +static inline void kvm_s2_ptdump_create_debugfs(struct kvm *kvm) {} +#endif /* CONFIG_PTDUMP_STAGE2_DEBUGFS */ + +#endif /* __ASSEMBLER__ */ #endif /* __ARM64_KVM_MMU_H__ */ |
