diff options
Diffstat (limited to 'arch/x86/include/asm')
-rw-r--r-- | arch/x86/include/asm/acpi.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/ce4100.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/cpufeatures.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/fpu/types.h | 49 | ||||
-rw-r--r-- | arch/x86/include/asm/fpu/xstate.h | 9 | ||||
-rw-r--r-- | arch/x86/include/asm/init.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/intel_telemetry.h | 37 | ||||
-rw-r--r-- | arch/x86/include/asm/mshyperv.h | 22 | ||||
-rw-r--r-- | arch/x86/include/asm/msr-index.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/pgtable_types.h | 3 | ||||
-rw-r--r-- | arch/x86/include/asm/realmode.h | 2 | ||||
-rw-r--r-- | arch/x86/include/asm/sev.h | 14 | ||||
-rw-r--r-- | arch/x86/include/asm/smp.h | 23 | ||||
-rw-r--r-- | arch/x86/include/asm/special_insns.h | 29 |
14 files changed, 124 insertions, 85 deletions
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h index 5ab1a4598d00..a03aa6f999d1 100644 --- a/arch/x86/include/asm/acpi.h +++ b/arch/x86/include/asm/acpi.h @@ -158,13 +158,13 @@ static inline bool acpi_has_cpu_in_madt(void) } #define ACPI_HAVE_ARCH_SET_ROOT_POINTER -static inline void acpi_arch_set_root_pointer(u64 addr) +static __always_inline void acpi_arch_set_root_pointer(u64 addr) { x86_init.acpi.set_root_pointer(addr); } #define ACPI_HAVE_ARCH_GET_ROOT_POINTER -static inline u64 acpi_arch_get_root_pointer(void) +static __always_inline u64 acpi_arch_get_root_pointer(void) { return x86_init.acpi.get_root_pointer(); } diff --git a/arch/x86/include/asm/ce4100.h b/arch/x86/include/asm/ce4100.h index 2930f560d7f3..e1f965bb1e31 100644 --- a/arch/x86/include/asm/ce4100.h +++ b/arch/x86/include/asm/ce4100.h @@ -4,4 +4,10 @@ int ce4100_pci_init(void); +#ifdef CONFIG_SERIAL_8250 +void __init sdv_serial_fixup(void); +#else +static inline void sdv_serial_fixup(void) {}; +#endif + #endif diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 286d509f9363..602957dd2609 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -458,9 +458,12 @@ #define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */ #define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */ #define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */ + #define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */ #define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */ +#define X86_FEATURE_GP_ON_USER_CPUID (20*32+17) /* User CPUID faulting */ + #define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */ #define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */ diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h index 1c94121acd3d..93e99d2583d6 100644 --- a/arch/x86/include/asm/fpu/types.h +++ b/arch/x86/include/asm/fpu/types.h @@ -118,7 +118,7 @@ enum xfeature { XFEATURE_PKRU, XFEATURE_PASID, XFEATURE_CET_USER, - XFEATURE_CET_KERNEL_UNUSED, + XFEATURE_CET_KERNEL, XFEATURE_RSRVD_COMP_13, XFEATURE_RSRVD_COMP_14, XFEATURE_LBR, @@ -142,7 +142,7 @@ enum xfeature { #define XFEATURE_MASK_PKRU (1 << XFEATURE_PKRU) #define XFEATURE_MASK_PASID (1 << XFEATURE_PASID) #define XFEATURE_MASK_CET_USER (1 << XFEATURE_CET_USER) -#define XFEATURE_MASK_CET_KERNEL (1 << XFEATURE_CET_KERNEL_UNUSED) +#define XFEATURE_MASK_CET_KERNEL (1 << XFEATURE_CET_KERNEL) #define XFEATURE_MASK_LBR (1 << XFEATURE_LBR) #define XFEATURE_MASK_XTILE_CFG (1 << XFEATURE_XTILE_CFG) #define XFEATURE_MASK_XTILE_DATA (1 << XFEATURE_XTILE_DATA) @@ -269,6 +269,16 @@ struct cet_user_state { }; /* + * State component 12 is Control-flow Enforcement supervisor states. + * This state includes SSP pointers for privilege levels 0 through 2. + */ +struct cet_supervisor_state { + u64 pl0_ssp; + u64 pl1_ssp; + u64 pl2_ssp; +} __packed; + +/* * State component 15: Architectural LBR configuration state. * The size of Arch LBR state depends on the number of LBRs (lbr_depth). */ @@ -552,6 +562,31 @@ struct fpu_guest { }; /* + * FPU state configuration data for fpu_guest. + * Initialized at boot time. Read only after init. + */ +struct vcpu_fpu_config { + /* + * @size: + * + * The default size of the register state buffer in guest FPUs. + * Includes all supported features except independent managed + * features and features which have to be requested by user space + * before usage. + */ + unsigned int size; + + /* + * @features: + * + * The default supported features bitmap in guest FPUs. Does not + * include independent managed features and features which have to + * be requested by user space before usage. + */ + u64 features; +}; + +/* * FPU state configuration data. Initialized at boot time. Read only after init. */ struct fpu_state_config { @@ -567,8 +602,9 @@ struct fpu_state_config { * @default_size: * * The default size of the register state buffer. Includes all - * supported features except independent managed features and - * features which have to be requested by user space before usage. + * supported features except independent managed features, + * guest-only features and features which have to be requested by + * user space before usage. */ unsigned int default_size; @@ -584,8 +620,8 @@ struct fpu_state_config { * @default_features: * * The default supported features bitmap. Does not include - * independent managed features and features which have to - * be requested by user space before usage. + * independent managed features, guest-only features and features + * which have to be requested by user space before usage. */ u64 default_features; /* @@ -606,5 +642,6 @@ struct fpu_state_config { /* FPU state configuration information */ extern struct fpu_state_config fpu_kernel_cfg, fpu_user_cfg; +extern struct vcpu_fpu_config guest_default_cfg; #endif /* _ASM_X86_FPU_TYPES_H */ diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h index b308a76afbb7..7a7dc9d56027 100644 --- a/arch/x86/include/asm/fpu/xstate.h +++ b/arch/x86/include/asm/fpu/xstate.h @@ -46,9 +46,13 @@ /* Features which are dynamically enabled for a process on request */ #define XFEATURE_MASK_USER_DYNAMIC XFEATURE_MASK_XTILE_DATA +/* Supervisor features which are enabled only in guest FPUs */ +#define XFEATURE_MASK_GUEST_SUPERVISOR XFEATURE_MASK_CET_KERNEL + /* All currently supported supervisor features */ #define XFEATURE_MASK_SUPERVISOR_SUPPORTED (XFEATURE_MASK_PASID | \ - XFEATURE_MASK_CET_USER) + XFEATURE_MASK_CET_USER | \ + XFEATURE_MASK_GUEST_SUPERVISOR) /* * A supervisor state component may not always contain valuable information, @@ -75,8 +79,7 @@ * Unsupported supervisor features. When a supervisor feature in this mask is * supported in the future, move it to the supported supervisor feature mask. */ -#define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT | \ - XFEATURE_MASK_CET_KERNEL) +#define XFEATURE_MASK_SUPERVISOR_UNSUPPORTED (XFEATURE_MASK_PT) /* All supervisor states including supported and unsupported states. */ #define XFEATURE_MASK_SUPERVISOR_ALL (XFEATURE_MASK_SUPERVISOR_SUPPORTED | \ diff --git a/arch/x86/include/asm/init.h b/arch/x86/include/asm/init.h index 8b1b1abcef15..5a68e9db6518 100644 --- a/arch/x86/include/asm/init.h +++ b/arch/x86/include/asm/init.h @@ -5,7 +5,7 @@ #if defined(CONFIG_CC_IS_CLANG) && CONFIG_CLANG_VERSION < 170000 #define __head __section(".head.text") __no_sanitize_undefined __no_stack_protector #else -#define __head __section(".head.text") __no_sanitize_undefined +#define __head __section(".head.text") __no_sanitize_undefined __no_kstack_erase #endif struct x86_mapping_info { diff --git a/arch/x86/include/asm/intel_telemetry.h b/arch/x86/include/asm/intel_telemetry.h index 43b7657febca..944637a4e6de 100644 --- a/arch/x86/include/asm/intel_telemetry.h +++ b/arch/x86/include/asm/intel_telemetry.h @@ -59,18 +59,6 @@ struct telemetry_plt_config { }; struct telemetry_core_ops { - int (*get_sampling_period)(u8 *pss_min_period, u8 *pss_max_period, - u8 *ioss_min_period, u8 *ioss_max_period); - - int (*get_eventconfig)(struct telemetry_evtconfig *pss_evtconfig, - struct telemetry_evtconfig *ioss_evtconfig, - int pss_len, int ioss_len); - - int (*update_events)(struct telemetry_evtconfig pss_evtconfig, - struct telemetry_evtconfig ioss_evtconfig); - - int (*set_sampling_period)(u8 pss_period, u8 ioss_period); - int (*get_trace_verbosity)(enum telemetry_unit telem_unit, u32 *verbosity); @@ -84,11 +72,6 @@ struct telemetry_core_ops { int (*read_eventlog)(enum telemetry_unit telem_unit, struct telemetry_evtlog *evtlog, int len, int log_all_evts); - - int (*add_events)(u8 num_pss_evts, u8 num_ioss_evts, - u32 *pss_evtmap, u32 *ioss_evtmap); - - int (*reset_events)(void); }; int telemetry_set_pltdata(const struct telemetry_core_ops *ops, @@ -101,35 +84,15 @@ struct telemetry_plt_config *telemetry_get_pltdata(void); int telemetry_get_evtname(enum telemetry_unit telem_unit, const char **name, int len); -int telemetry_update_events(struct telemetry_evtconfig pss_evtconfig, - struct telemetry_evtconfig ioss_evtconfig); - -int telemetry_add_events(u8 num_pss_evts, u8 num_ioss_evts, - u32 *pss_evtmap, u32 *ioss_evtmap); - -int telemetry_reset_events(void); - -int telemetry_get_eventconfig(struct telemetry_evtconfig *pss_config, - struct telemetry_evtconfig *ioss_config, - int pss_len, int ioss_len); - int telemetry_read_events(enum telemetry_unit telem_unit, struct telemetry_evtlog *evtlog, int len); -int telemetry_raw_read_events(enum telemetry_unit telem_unit, - struct telemetry_evtlog *evtlog, int len); - int telemetry_read_eventlog(enum telemetry_unit telem_unit, struct telemetry_evtlog *evtlog, int len); int telemetry_raw_read_eventlog(enum telemetry_unit telem_unit, struct telemetry_evtlog *evtlog, int len); -int telemetry_get_sampling_period(u8 *pss_min_period, u8 *pss_max_period, - u8 *ioss_min_period, u8 *ioss_max_period); - -int telemetry_set_sampling_period(u8 pss_period, u8 ioss_period); - int telemetry_set_trace_verbosity(enum telemetry_unit telem_unit, u32 verbosity); diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h index e1752ba47e67..abc4659f5809 100644 --- a/arch/x86/include/asm/mshyperv.h +++ b/arch/x86/include/asm/mshyperv.h @@ -112,12 +112,6 @@ static inline u64 hv_do_hypercall(u64 control, void *input, void *output) return hv_status; } -/* Hypercall to the L0 hypervisor */ -static inline u64 hv_do_nested_hypercall(u64 control, void *input, void *output) -{ - return hv_do_hypercall(control | HV_HYPERCALL_NESTED, input, output); -} - /* Fast hypercall with 8 bytes of input and no output */ static inline u64 _hv_do_fast_hypercall8(u64 control, u64 input1) { @@ -165,13 +159,6 @@ static inline u64 hv_do_fast_hypercall8(u16 code, u64 input1) return _hv_do_fast_hypercall8(control, input1); } -static inline u64 hv_do_fast_nested_hypercall8(u16 code, u64 input1) -{ - u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED; - - return _hv_do_fast_hypercall8(control, input1); -} - /* Fast hypercall with 16 bytes of input */ static inline u64 _hv_do_fast_hypercall16(u64 control, u64 input1, u64 input2) { @@ -223,13 +210,6 @@ static inline u64 hv_do_fast_hypercall16(u16 code, u64 input1, u64 input2) return _hv_do_fast_hypercall16(control, input1, input2); } -static inline u64 hv_do_fast_nested_hypercall16(u16 code, u64 input1, u64 input2) -{ - u64 control = (u64)code | HV_HYPERCALL_FAST_BIT | HV_HYPERCALL_NESTED; - - return _hv_do_fast_hypercall16(control, input1, input2); -} - extern struct hv_vp_assist_page **hv_vp_assist_page; static inline struct hv_vp_assist_page *hv_get_vp_assist_page(unsigned int cpu) @@ -262,6 +242,8 @@ static inline void hv_apic_init(void) {} struct irq_domain *hv_create_pci_msi_domain(void); +int hv_map_msi_interrupt(struct irq_data *data, + struct hv_interrupt_entry *out_entry); int hv_map_ioapic_interrupt(int ioapic_id, bool level, int vcpu, int vector, struct hv_interrupt_entry *entry); int hv_unmap_ioapic_interrupt(int ioapic_id, struct hv_interrupt_entry *entry); diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index 5cfb5d74dd5f..7490bb5c0776 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -733,6 +733,11 @@ #define MSR_AMD64_PERF_CNTR_GLOBAL_CTL 0xc0000301 #define MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR 0xc0000302 +/* AMD Hardware Feedback Support MSRs */ +#define MSR_AMD_WORKLOAD_CLASS_CONFIG 0xc0000500 +#define MSR_AMD_WORKLOAD_CLASS_ID 0xc0000501 +#define MSR_AMD_WORKLOAD_HRST 0xc0000502 + /* AMD Last Branch Record MSRs */ #define MSR_AMD64_LBR_SELECT 0xc000010e @@ -831,6 +836,7 @@ #define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT) #define MSR_K7_HWCR_IRPERF_EN_BIT 30 #define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT) +#define MSR_K7_HWCR_CPUID_USER_DIS_BIT 35 #define MSR_K7_FID_VID_CTL 0xc0010041 #define MSR_K7_FID_VID_STATUS 0xc0010042 #define MSR_K7_HWCR_CPB_DIS_BIT 25 diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index b74ec5c3643b..a5731fb1e9dd 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -214,9 +214,6 @@ enum page_cache_mode { #define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0) #define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0) -#define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G) -#define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G) - /* * Page tables needs to have Write=1 in order for any lower PTEs to be * writable. This includes shadow stack memory (Write=0, Dirty=1) diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h index f607081a022a..e406a1e92c63 100644 --- a/arch/x86/include/asm/realmode.h +++ b/arch/x86/include/asm/realmode.h @@ -78,7 +78,7 @@ extern unsigned char secondary_startup_64[]; extern unsigned char secondary_startup_64_no_verify[]; #endif -static inline size_t real_mode_size_needed(void) +static __always_inline size_t real_mode_size_needed(void) { if (real_mode_header) return 0; /* already allocated. */ diff --git a/arch/x86/include/asm/sev.h b/arch/x86/include/asm/sev.h index a631f7d7c0c0..89075ff19afa 100644 --- a/arch/x86/include/asm/sev.h +++ b/arch/x86/include/asm/sev.h @@ -243,6 +243,7 @@ struct snp_guest_req { size_t resp_sz; u64 exit_code; + u64 exitinfo2; unsigned int vmpck_id; u8 msg_version; u8 msg_type; @@ -460,7 +461,7 @@ static __always_inline void sev_es_nmi_complete(void) cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) __sev_es_nmi_complete(); } -extern int __init sev_es_efi_map_ghcbs(pgd_t *pgd); +extern int __init sev_es_efi_map_ghcbs_cas(pgd_t *pgd); extern void sev_enable(struct boot_params *bp); /* @@ -501,8 +502,6 @@ static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) return rc; } -struct snp_guest_request_ioctl; - void setup_ghcb(void); void early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr, unsigned long npages); @@ -528,8 +527,7 @@ void snp_kexec_begin(void); int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id); struct snp_msg_desc *snp_msg_alloc(void); void snp_msg_free(struct snp_msg_desc *mdesc); -int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, - struct snp_guest_request_ioctl *rio); +int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req); int snp_svsm_vtpm_send_command(u8 *buffer); @@ -571,7 +569,7 @@ static inline void sev_es_ist_enter(struct pt_regs *regs) { } static inline void sev_es_ist_exit(void) { } static inline int sev_es_setup_ap_jump_table(struct real_mode_header *rmh) { return 0; } static inline void sev_es_nmi_complete(void) { } -static inline int sev_es_efi_map_ghcbs(pgd_t *pgd) { return 0; } +static inline int sev_es_efi_map_ghcbs_cas(pgd_t *pgd) { return 0; } static inline void sev_enable(struct boot_params *bp) { } static inline int pvalidate(unsigned long vaddr, bool rmp_psize, bool validate) { return 0; } static inline int rmpadjust(unsigned long vaddr, bool rmp_psize, unsigned long attrs) { return 0; } @@ -602,8 +600,8 @@ static inline void snp_kexec_begin(void) { } static inline int snp_msg_init(struct snp_msg_desc *mdesc, int vmpck_id) { return -1; } static inline struct snp_msg_desc *snp_msg_alloc(void) { return NULL; } static inline void snp_msg_free(struct snp_msg_desc *mdesc) { } -static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, - struct snp_guest_request_ioctl *rio) { return -ENODEV; } +static inline int snp_send_guest_request(struct snp_msg_desc *mdesc, + struct snp_guest_req *req) { return -ENODEV; } static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; } static inline void __init snp_secure_tsc_prepare(void) { } static inline void __init snp_secure_tsc_init(void) { } diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index 0c1c68039d6f..22bfebe6776d 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h @@ -112,7 +112,10 @@ void __noreturn hlt_play_dead(void); void native_play_dead(void); void play_dead_common(void); void wbinvd_on_cpu(int cpu); -int wbinvd_on_all_cpus(void); +void wbinvd_on_all_cpus(void); +void wbinvd_on_cpus_mask(struct cpumask *cpus); +void wbnoinvd_on_all_cpus(void); +void wbnoinvd_on_cpus_mask(struct cpumask *cpus); void smp_kick_mwait_play_dead(void); void __noreturn mwait_play_dead(unsigned int eax_hint); @@ -148,10 +151,24 @@ static inline struct cpumask *cpu_l2c_shared_mask(int cpu) #else /* !CONFIG_SMP */ #define wbinvd_on_cpu(cpu) wbinvd() -static inline int wbinvd_on_all_cpus(void) +static inline void wbinvd_on_all_cpus(void) { wbinvd(); - return 0; +} + +static inline void wbinvd_on_cpus_mask(struct cpumask *cpus) +{ + wbinvd(); +} + +static inline void wbnoinvd_on_all_cpus(void) +{ + wbnoinvd(); +} + +static inline void wbnoinvd_on_cpus_mask(struct cpumask *cpus) +{ + wbnoinvd(); } static inline struct cpumask *cpu_llc_shared_mask(int cpu) diff --git a/arch/x86/include/asm/special_insns.h b/arch/x86/include/asm/special_insns.h index ecda17efa042..fde2bd7af19e 100644 --- a/arch/x86/include/asm/special_insns.h +++ b/arch/x86/include/asm/special_insns.h @@ -104,9 +104,36 @@ static inline void wrpkru(u32 pkru) } #endif +/* + * Write back all modified lines in all levels of cache associated with this + * logical processor to main memory, and then invalidate all caches. Depending + * on the micro-architecture, WBINVD (and WBNOINVD below) may or may not affect + * lower level caches associated with another logical processor that shares any + * level of this processor's cache hierarchy. + */ static __always_inline void wbinvd(void) { - asm volatile("wbinvd": : :"memory"); + asm volatile("wbinvd" : : : "memory"); +} + +/* Instruction encoding provided for binutils backwards compatibility. */ +#define ASM_WBNOINVD _ASM_BYTES(0xf3,0x0f,0x09) + +/* + * Write back all modified lines in all levels of cache associated with this + * logical processor to main memory, but do NOT explicitly invalidate caches, + * i.e. leave all/most cache lines in the hierarchy in non-modified state. + */ +static __always_inline void wbnoinvd(void) +{ + /* + * Explicitly encode WBINVD if X86_FEATURE_WBNOINVD is unavailable even + * though WBNOINVD is backwards compatible (it's simply WBINVD with an + * ignored REP prefix), to guarantee that WBNOINVD isn't used if it + * needs to be avoided for any reason. For all supported usage in the + * kernel, WBINVD is functionally a superset of WBNOINVD. + */ + alternative("wbinvd", ASM_WBNOINVD, X86_FEATURE_WBNOINVD); } static inline unsigned long __read_cr4(void) |