summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_host.h
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/include/asm/kvm_host.h')
-rw-r--r--arch/arm64/include/asm/kvm_host.h160
1 files changed, 138 insertions, 22 deletions
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d919557af5e5..d941abc6b5ee 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -39,19 +39,21 @@
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#define KVM_VCPU_MAX_FEATURES 7
+#define KVM_VCPU_MAX_FEATURES 9
#define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
#define KVM_REQ_SLEEP \
KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
-#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
-#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
-#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
-#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
-#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
-#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
-#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
-#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
+#define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
+#define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
+#define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
+#define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
+#define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
+#define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
+#define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
+#define KVM_REQ_GUEST_HYP_IRQ_PENDING KVM_ARCH_REQ(9)
+#define KVM_REQ_MAP_L1_VNCR_EL2 KVM_ARCH_REQ(10)
#define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
KVM_DIRTY_LOG_INITIALLY_SET)
@@ -86,6 +88,9 @@ struct kvm_hyp_memcache {
phys_addr_t head;
unsigned long nr_pages;
struct pkvm_mapping *mapping; /* only used from EL1 */
+
+#define HYP_MEMCACHE_ACCOUNT_STAGE2 BIT(1)
+ unsigned long flags;
};
static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
@@ -237,7 +242,8 @@ struct kvm_arch_memory_slot {
struct kvm_smccc_features {
unsigned long std_bmap;
unsigned long std_hyp_bmap;
- unsigned long vendor_hyp_bmap;
+ unsigned long vendor_hyp_bmap; /* Function numbers 0-63 */
+ unsigned long vendor_hyp_bmap_2; /* Function numbers 64-127 */
};
typedef unsigned int pkvm_handle_t;
@@ -245,6 +251,7 @@ typedef unsigned int pkvm_handle_t;
struct kvm_protected_vm {
pkvm_handle_t handle;
struct kvm_hyp_memcache teardown_mc;
+ struct kvm_hyp_memcache stage2_teardown_mc;
bool enabled;
};
@@ -267,11 +274,17 @@ struct kvm_sysreg_masks;
enum fgt_group_id {
__NO_FGT_GROUP__,
- HFGxTR_GROUP,
+ HFGRTR_GROUP,
+ HFGWTR_GROUP = HFGRTR_GROUP,
HDFGRTR_GROUP,
HDFGWTR_GROUP = HDFGRTR_GROUP,
HFGITR_GROUP,
HAFGRTR_GROUP,
+ HFGRTR2_GROUP,
+ HFGWTR2_GROUP = HFGRTR2_GROUP,
+ HDFGRTR2_GROUP,
+ HDFGWTR2_GROUP = HDFGRTR2_GROUP,
+ HFGITR2_GROUP,
/* Must be last */
__NR_FGT_GROUP_IDS__
@@ -334,6 +347,8 @@ struct kvm_arch {
#define KVM_ARCH_FLAG_FGU_INITIALIZED 8
/* SVE exposed to guest */
#define KVM_ARCH_FLAG_GUEST_HAS_SVE 9
+ /* MIDR_EL1, REVIDR_EL1, and AIDR_EL1 are writable from userspace */
+#define KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS 10
unsigned long flags;
/* VM-wide vCPU feature set */
@@ -351,8 +366,8 @@ struct kvm_arch {
cpumask_var_t supported_cpus;
- /* PMCR_EL0.N value for the guest */
- u8 pmcr_n;
+ /* Maximum number of counters for the guest */
+ u8 nr_pmu_counters;
/* Iterator for idreg debugfs */
u8 idreg_debugfs_iter;
@@ -373,11 +388,17 @@ struct kvm_arch {
#define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
u64 id_regs[KVM_ARM_ID_REG_NUM];
+ u64 midr_el1;
+ u64 revidr_el1;
+ u64 aidr_el1;
u64 ctr_el0;
/* Masks for VNCR-backed and general EL2 sysregs */
struct kvm_sysreg_masks *sysreg_masks;
+ /* Count the number of VNCR_EL2 currently mapped */
+ atomic_t vncr_map_count;
+
/*
* For an untrusted host VM, 'pkvm.handle' is used to lookup
* the associated pKVM instance in the hypervisor.
@@ -550,6 +571,13 @@ enum vcpu_sysreg {
VNCR(HDFGRTR_EL2),
VNCR(HDFGWTR_EL2),
VNCR(HAFGRTR_EL2),
+ VNCR(HFGRTR2_EL2),
+ VNCR(HFGWTR2_EL2),
+ VNCR(HFGITR2_EL2),
+ VNCR(HDFGRTR2_EL2),
+ VNCR(HDFGWTR2_EL2),
+
+ VNCR(VNCR_EL2),
VNCR(CNTVOFF_EL2),
VNCR(CNTV_CVAL_EL0),
@@ -557,7 +585,33 @@ enum vcpu_sysreg {
VNCR(CNTP_CVAL_EL0),
VNCR(CNTP_CTL_EL0),
+ VNCR(ICH_LR0_EL2),
+ VNCR(ICH_LR1_EL2),
+ VNCR(ICH_LR2_EL2),
+ VNCR(ICH_LR3_EL2),
+ VNCR(ICH_LR4_EL2),
+ VNCR(ICH_LR5_EL2),
+ VNCR(ICH_LR6_EL2),
+ VNCR(ICH_LR7_EL2),
+ VNCR(ICH_LR8_EL2),
+ VNCR(ICH_LR9_EL2),
+ VNCR(ICH_LR10_EL2),
+ VNCR(ICH_LR11_EL2),
+ VNCR(ICH_LR12_EL2),
+ VNCR(ICH_LR13_EL2),
+ VNCR(ICH_LR14_EL2),
+ VNCR(ICH_LR15_EL2),
+
+ VNCR(ICH_AP0R0_EL2),
+ VNCR(ICH_AP0R1_EL2),
+ VNCR(ICH_AP0R2_EL2),
+ VNCR(ICH_AP0R3_EL2),
+ VNCR(ICH_AP1R0_EL2),
+ VNCR(ICH_AP1R1_EL2),
+ VNCR(ICH_AP1R2_EL2),
+ VNCR(ICH_AP1R3_EL2),
VNCR(ICH_HCR_EL2),
+ VNCR(ICH_VMCR_EL2),
NR_SYS_REGS /* Nothing after this line! */
};
@@ -569,6 +623,37 @@ struct kvm_sysreg_masks {
} mask[NR_SYS_REGS - __SANITISED_REG_START__];
};
+struct fgt_masks {
+ const char *str;
+ u64 mask;
+ u64 nmask;
+ u64 res0;
+};
+
+extern struct fgt_masks hfgrtr_masks;
+extern struct fgt_masks hfgwtr_masks;
+extern struct fgt_masks hfgitr_masks;
+extern struct fgt_masks hdfgrtr_masks;
+extern struct fgt_masks hdfgwtr_masks;
+extern struct fgt_masks hafgrtr_masks;
+extern struct fgt_masks hfgrtr2_masks;
+extern struct fgt_masks hfgwtr2_masks;
+extern struct fgt_masks hfgitr2_masks;
+extern struct fgt_masks hdfgrtr2_masks;
+extern struct fgt_masks hdfgwtr2_masks;
+
+extern struct fgt_masks kvm_nvhe_sym(hfgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgwtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgitr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgwtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hafgrtr_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgrtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgwtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hfgitr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgrtr2_masks);
+extern struct fgt_masks kvm_nvhe_sym(hdfgwtr2_masks);
+
struct kvm_cpu_context {
struct user_pt_regs regs; /* sp = sp_el0 */
@@ -617,6 +702,8 @@ struct kvm_host_data {
#define KVM_HOST_DATA_FLAG_HAS_TRBE 1
#define KVM_HOST_DATA_FLAG_TRBE_ENABLED 4
#define KVM_HOST_DATA_FLAG_EL1_TRACING_CONFIGURED 5
+#define KVM_HOST_DATA_FLAG_VCPU_IN_HYP_CONTEXT 6
+#define KVM_HOST_DATA_FLAG_L1_VNCR_MAPPED 7
unsigned long flags;
struct kvm_cpu_context host_ctxt;
@@ -693,6 +780,8 @@ struct vcpu_reset_state {
bool reset;
};
+struct vncr_tlb;
+
struct kvm_vcpu_arch {
struct kvm_cpu_context ctxt;
@@ -787,6 +876,9 @@ struct kvm_vcpu_arch {
/* Per-vcpu CCSIDR override or NULL */
u32 *ccsidr;
+
+ /* Per-vcpu TLB for VNCR_EL2 -- NULL when !NV */
+ struct vncr_tlb *vncr_tlb;
};
/*
@@ -869,6 +961,8 @@ struct kvm_vcpu_arch {
#define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(0))
/* SVE config completed */
#define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
+/* pKVM VCPU setup completed */
+#define VCPU_PKVM_FINALIZED __vcpu_single_flag(cflags, BIT(2))
/* Exception pending */
#define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
@@ -919,6 +1013,8 @@ struct kvm_vcpu_arch {
#define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(5))
/* WFI instruction trapped */
#define IN_WFI __vcpu_single_flag(sflags, BIT(6))
+/* KVM is currently emulating a nested ERET */
+#define IN_NESTED_ERET __vcpu_single_flag(sflags, BIT(7))
/* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
@@ -930,20 +1026,22 @@ struct kvm_vcpu_arch {
#define vcpu_sve_zcr_elx(vcpu) \
(unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
-#define vcpu_sve_state_size(vcpu) ({ \
+#define sve_state_size_from_vl(sve_max_vl) ({ \
size_t __size_ret; \
- unsigned int __vcpu_vq; \
+ unsigned int __vq; \
\
- if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
+ if (WARN_ON(!sve_vl_valid(sve_max_vl))) { \
__size_ret = 0; \
} else { \
- __vcpu_vq = vcpu_sve_max_vq(vcpu); \
- __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
+ __vq = sve_vq_from_vl(sve_max_vl); \
+ __size_ret = SVE_SIG_REGS_SIZE(__vq); \
} \
\
__size_ret; \
})
+#define vcpu_sve_state_size(vcpu) sve_state_size_from_vl((vcpu)->arch.sve_max_vl)
+
#define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
KVM_GUESTDBG_USE_SW_BP | \
KVM_GUESTDBG_USE_HW | \
@@ -1334,8 +1432,6 @@ static inline bool kvm_system_needs_idmapped_vectors(void)
return cpus_have_final_cap(ARM64_SPECTRE_V3A);
}
-static inline void kvm_arch_sync_events(struct kvm *kvm) {}
-
void kvm_init_host_debug_data(void);
void kvm_vcpu_load_debug(struct kvm_vcpu *vcpu);
void kvm_vcpu_put_debug(struct kvm_vcpu *vcpu);
@@ -1459,6 +1555,12 @@ static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
return &ka->id_regs[IDREG_IDX(reg)];
case SYS_CTR_EL0:
return &ka->ctr_el0;
+ case SYS_MIDR_EL1:
+ return &ka->midr_el1;
+ case SYS_REVIDR_EL1:
+ return &ka->revidr_el1;
+ case SYS_AIDR_EL1:
+ return &ka->aidr_el1;
default:
WARN_ON_ONCE(1);
return NULL;
@@ -1505,12 +1607,16 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \
kvm_cmp_feat_unsigned(kvm, id, fld, op, limit))
-#define kvm_has_feat(kvm, id, fld, limit) \
+#define __kvm_has_feat(kvm, id, fld, limit) \
kvm_cmp_feat(kvm, id, fld, >=, limit)
-#define kvm_has_feat_enum(kvm, id, fld, val) \
+#define kvm_has_feat(kvm, ...) __kvm_has_feat(kvm, __VA_ARGS__)
+
+#define __kvm_has_feat_enum(kvm, id, fld, val) \
kvm_cmp_feat_unsigned(kvm, id, fld, ==, val)
+#define kvm_has_feat_enum(kvm, ...) __kvm_has_feat_enum(kvm, __VA_ARGS__)
+
#define kvm_has_feat_range(kvm, id, fld, min, max) \
(kvm_cmp_feat(kvm, id, fld, >=, min) && \
kvm_cmp_feat(kvm, id, fld, <=, max))
@@ -1543,4 +1649,14 @@ void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
#define kvm_has_s1poe(k) \
(kvm_has_feat((k), ID_AA64MMFR3_EL1, S1POE, IMP))
+static inline bool kvm_arch_has_irq_bypass(void)
+{
+ return true;
+}
+
+void compute_fgu(struct kvm *kvm, enum fgt_group_id fgt);
+void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *res1);
+void check_feature_map(void);
+
+
#endif /* __ARM64_KVM_HOST_H__ */