summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorMarc Zyngier <maz@kernel.org>2023-08-28 09:30:32 +0100
committerMarc Zyngier <maz@kernel.org>2023-08-28 09:30:32 +0100
commit1f66f1246bfa08aaf13db897736de49cbeaf72a1 (patch)
tree2bd184a40c479a16fbee43d29e423c36bc89ee37 /arch/arm64/include/asm/kvm_host.h
parent50a40ff7d311da4c28e7f9d5e9113b4fab3b7b8e (diff)
parentf156a7d13fc35d0078cd644b8cf0a6f97cbbe2e2 (diff)
Merge branch kvm-arm64/6.6/misc into kvmarm-master/next
* kvm-arm64/6.6/misc: : . : Misc KVM/arm64 updates for 6.6: : : - Don't unnecessary align non-stack allocations in the EL2 VA space : : - Drop HCR_VIRT_EXCP_MASK, which was never used... : : - Don't use smp_processor_id() in kvm_arch_vcpu_load(), : but the cpu parameter instead : : - Drop redundant call to kvm_set_pfn_accessed() in user_mem_abort() : : - Remove prototypes without implementations : . KVM: arm64: Remove size-order align in the nVHE hyp private VA range KVM: arm64: Remove unused declarations KVM: arm64: Remove redundant kvm_set_pfn_accessed() from user_mem_abort() KVM: arm64: Drop HCR_VIRT_EXCP_MASK KVM: arm64: Use the known cpu id instead of smp_processor_id() Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/include/asm/kvm_host.h')
-rw-r--r--arch/arm64/include/asm/kvm_host.h6
1 files changed, 0 insertions, 6 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index dfc26992d14f..af06ccb7ee34 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -974,8 +974,6 @@ void kvm_arm_resume_guest(struct kvm *kvm);
#define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
#endif /* __KVM_NVHE_HYPERVISOR__ */
-void force_vm_exit(const cpumask_t *mask);
-
int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
@@ -1057,8 +1055,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_const_cap(ARM64_SPECTRE_V3A);
}
-void kvm_arm_vcpu_ptrauth_trap(struct kvm_vcpu *vcpu);
-
static inline void kvm_arch_sync_events(struct kvm *kvm) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
@@ -1130,8 +1126,6 @@ static inline bool kvm_vm_is_protected(struct kvm *kvm)
return false;
}
-void kvm_init_protected_traps(struct kvm_vcpu *vcpu);
-
int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);