diff options
author | James Clark <james.clark@linaro.org> | 2025-01-06 14:24:40 +0000 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2025-01-12 12:50:11 +0000 |
commit | a665e3bc88081dd65642d83fc22a1abdb6a901bc (patch) | |
tree | af3275ecb9f2ea20b75ea809fefd2e534c4893c4 /arch/arm64/include/asm | |
parent | a2b579c41fe9c295804abd167751f9fdc73c7006 (diff) |
KVM: arm64: coresight: Give TRBE enabled state to KVM
Currently in nVHE, KVM has to check if TRBE is enabled on every guest
switch even if it was never used. Because it's a debug feature and is
more likely to not be used than used, give KVM the TRBE buffer status to
allow a much simpler and faster do-nothing path in the hyp.
Protected mode now disables trace regardless of TRBE (because
trfcr_while_in_guest is always 0), which was not previously done.
However, it continues to flush whenever the buffer is enabled
regardless of the filter status. This avoids the hypothetical case of a
host that had disabled the filter but not flushed which would arise if
only doing the flush when the filter was enabled.
Signed-off-by: James Clark <james.clark@linaro.org>
Link: https://lore.kernel.org/r/20250106142446.628923-6-james.clark@linaro.org
Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/include/asm')
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index e7c740c99ee3..fec53d84e990 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -614,6 +614,8 @@ struct kvm_host_data { #define KVM_HOST_DATA_FLAG_HAS_TRBE 1 #define KVM_HOST_DATA_FLAG_HOST_SVE_ENABLED 2 #define KVM_HOST_DATA_FLAG_HOST_SME_ENABLED 3 +#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4 +#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5 unsigned long flags; struct kvm_cpu_context host_ctxt; @@ -659,6 +661,9 @@ struct kvm_host_data { u64 mdcr_el2; } host_debug_state; + /* Guest trace filter value */ + u64 trfcr_while_in_guest; + /* Number of programmable event counters (PMCR_EL0.N) for this CPU */ unsigned int nr_event_counters; @@ -1381,6 +1386,8 @@ static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr) void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr); void kvm_clr_pmu_events(u64 clr); bool kvm_set_pmuserenr(u64 val); +void kvm_enable_trbe(void); +void kvm_disable_trbe(void); #else static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {} static inline void kvm_clr_pmu_events(u64 clr) {} @@ -1388,6 +1395,8 @@ static inline bool kvm_set_pmuserenr(u64 val) { return false; } +static inline void kvm_enable_trbe(void) {} +static inline void kvm_disable_trbe(void) {} #endif void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu); |