diff options
Diffstat (limited to 'include/asm-generic/mshyperv.h')
| -rw-r--r-- | include/asm-generic/mshyperv.h | 161 |
1 files changed, 108 insertions, 53 deletions
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h index 8fe7aaab2599..ecedab554c80 100644 --- a/include/asm-generic/mshyperv.h +++ b/include/asm-generic/mshyperv.h @@ -6,9 +6,8 @@ * independent. See arch/<arch>/include/asm/mshyperv.h for definitions * that are specific to architecture <arch>. * - * Definitions that are specified in the Hyper-V Top Level Functional - * Spec (TLFS) should not go in this file, but should instead go in - * hyperv-tlfs.h. + * Definitions that are derived from Hyper-V code or headers should not go in + * this file, but should instead go in the relevant files in include/hyperv. * * Copyright (C) 2019, Microsoft, Inc. * @@ -25,13 +24,20 @@ #include <linux/cpumask.h> #include <linux/nmi.h> #include <asm/ptrace.h> -#include <asm/hyperv-tlfs.h> +#include <hyperv/hvhdk.h> #define VTPM_BASE_ADDRESS 0xfed40000 +enum hv_partition_type { + HV_PARTITION_TYPE_GUEST, + HV_PARTITION_TYPE_ROOT, + HV_PARTITION_TYPE_L1VH, +}; + struct ms_hyperv_info { u32 features; u32 priv_high; + u32 ext_features; u32 misc_features; u32 hints; u32 nested_features; @@ -56,18 +62,37 @@ struct ms_hyperv_info { }; }; u64 shared_gpa_boundary; + bool msi_ext_dest_id; + bool confidential_vmbus_available; }; extern struct ms_hyperv_info ms_hyperv; extern bool hv_nested; +extern u64 hv_current_partition_id; +extern enum hv_partition_type hv_curr_partition_type; extern void * __percpu *hyperv_pcpu_input_arg; extern void * __percpu *hyperv_pcpu_output_arg; -extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); -extern u64 hv_do_fast_hypercall8(u16 control, u64 input8); +u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr); +u64 hv_do_fast_hypercall8(u16 control, u64 input8); +u64 hv_do_fast_hypercall16(u16 control, u64 input1, u64 input2); + bool hv_isolation_type_snp(void); bool hv_isolation_type_tdx(void); +/* + * On architectures where Hyper-V doesn't support AEOI (e.g., ARM64), + * it doesn't provide a recommendation flag and AEOI must be disabled. + */ +static inline bool hv_recommend_using_aeoi(void) +{ +#ifdef HV_DEPRECATING_AEOI_RECOMMENDED + return !(ms_hyperv.hints & HV_DEPRECATING_AEOI_RECOMMENDED); +#else + return false; +#endif +} + static inline struct hv_proximity_domain_info hv_numa_node_to_pxm_info(int node) { struct hv_proximity_domain_info pxm_info = {}; @@ -101,10 +126,12 @@ static inline unsigned int hv_repcomp(u64 status) /* * Rep hypercalls. Callers of this functions are supposed to ensure that - * rep_count and varhead_size comply with Hyper-V hypercall definition. + * rep_count, varhead_size, and rep_start comply with Hyper-V hypercall + * definition. */ -static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, - void *input, void *output) +static inline u64 hv_do_rep_hypercall_ex(u16 code, u16 rep_count, + u16 varhead_size, u16 rep_start, + void *input, void *output) { u64 control = code; u64 status; @@ -112,6 +139,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET; control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET; + control |= (u64)rep_start << HV_HYPERCALL_REP_START_OFFSET; do { status = hv_do_hypercall(control, input, output); @@ -129,6 +157,14 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, return status; } +/* For the typical case where rep_start is 0 */ +static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size, + void *input, void *output) +{ + return hv_do_rep_hypercall_ex(code, rep_count, varhead_size, 0, + input, output); +} + /* Generate the guest OS identifier as described in the Hyper-V TLFS */ static inline u64 hv_generate_guest_id(u64 kernel_version) { @@ -140,41 +176,6 @@ static inline u64 hv_generate_guest_id(u64 kernel_version) return guest_id; } -/* Free the message slot and signal end-of-message if required */ -static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type) -{ - /* - * On crash we're reading some other CPU's message page and we need - * to be careful: this other CPU may already had cleared the header - * and the host may already had delivered some other message there. - * In case we blindly write msg->header.message_type we're going - * to lose it. We can still lose a message of the same type but - * we count on the fact that there can only be one - * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages - * on crash. - */ - if (cmpxchg(&msg->header.message_type, old_msg_type, - HVMSG_NONE) != old_msg_type) - return; - - /* - * The cmxchg() above does an implicit memory barrier to - * ensure the write to MessageType (ie set to - * HVMSG_NONE) happens before we read the - * MessagePending and EOMing. Otherwise, the EOMing - * will not deliver any more messages since there is - * no empty slot - */ - if (msg->header.message_flags.msg_pending) { - /* - * This will cause message queue rescan to - * possibly deliver another msg from the - * hypervisor - */ - hv_set_msr(HV_MSR_EOM, 0); - } -} - int hv_get_hypervisor_version(union hv_hypervisor_version_info *info); void hv_setup_vmbus_handler(void (*handler)(void)); @@ -186,11 +187,7 @@ void hv_setup_kexec_handler(void (*handler)(void)); void hv_remove_kexec_handler(void); void hv_setup_crash_handler(void (*handler)(struct pt_regs *regs)); void hv_remove_crash_handler(void); - -extern int vmbus_interrupt; -extern int vmbus_irq; - -extern bool hv_root_partition; +void hv_setup_mshv_handler(void (*handler)(void)); #if IS_ENABLED(CONFIG_HYPERV) /* @@ -208,14 +205,12 @@ extern u64 (*hv_read_reference_counter)(void); #define VP_INVAL U32_MAX int __init hv_common_init(void); +void __init hv_get_partition_id(void); void __init hv_common_free(void); void __init ms_hyperv_late_init(void); int hv_common_cpu_init(unsigned int cpu); int hv_common_cpu_die(unsigned int cpu); - -void *hv_alloc_hyperv_page(void); -void *hv_alloc_hyperv_zeroed_page(void); -void hv_free_hyperv_page(void *addr); +void hv_identify_partition_type(void); /** * hv_cpu_number_to_vp_number() - Map CPU to VP. @@ -292,6 +287,20 @@ static inline int cpumask_to_vpset_skip(struct hv_vpset *vpset, return __cpumask_to_vpset(vpset, cpus, func); } +#define _hv_status_fmt(fmt) "%s: Hyper-V status: %#x = %s: " fmt +#define hv_status_printk(level, status, fmt, ...) \ +do { \ + u64 __status = (status); \ + pr_##level(_hv_status_fmt(fmt), __func__, hv_result(__status), \ + hv_result_to_string(__status), ##__VA_ARGS__); \ +} while (0) +#define hv_status_err(status, fmt, ...) \ + hv_status_printk(err, status, fmt, ##__VA_ARGS__) +#define hv_status_debug(status, fmt, ...) \ + hv_status_printk(debug, status, fmt, ##__VA_ARGS__) + +const char *hv_result_to_string(u64 hv_status); +int hv_result_to_errno(u64 status); void hyperv_report_panic(struct pt_regs *regs, long err, bool in_die); bool hv_is_hyperv_initialized(void); bool hv_is_hibernation_supported(void); @@ -300,10 +309,15 @@ bool hv_is_isolation_supported(void); bool hv_isolation_type_snp(void); u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size); u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2); +void hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set); +void hv_para_set_sint_proxy(bool enable); +u64 hv_para_get_synic_register(unsigned int reg); +void hv_para_set_synic_register(unsigned int reg, u64 val); void hyperv_cleanup(void); bool hv_query_ext_cap(u64 cap_query); void hv_setup_dma_ops(struct device *dev, bool coherent); #else /* CONFIG_HYPERV */ +static inline void hv_identify_partition_type(void) {} static inline bool hv_is_hyperv_initialized(void) { return false; } static inline bool hv_is_hibernation_supported(void) { return false; } static inline void hyperv_cleanup(void) {} @@ -315,4 +329,45 @@ static inline enum hv_isolation_type hv_get_isolation_type(void) } #endif /* CONFIG_HYPERV */ +#if IS_ENABLED(CONFIG_MSHV_ROOT) +static inline bool hv_root_partition(void) +{ + return hv_curr_partition_type == HV_PARTITION_TYPE_ROOT; +} +static inline bool hv_l1vh_partition(void) +{ + return hv_curr_partition_type == HV_PARTITION_TYPE_L1VH; +} +static inline bool hv_parent_partition(void) +{ + return hv_root_partition() || hv_l1vh_partition(); +} +int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages); +int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id); +int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags); + +#else /* CONFIG_MSHV_ROOT */ +static inline bool hv_root_partition(void) { return false; } +static inline bool hv_l1vh_partition(void) { return false; } +static inline bool hv_parent_partition(void) { return false; } +static inline int hv_call_deposit_pages(int node, u64 partition_id, u32 num_pages) +{ + return -EOPNOTSUPP; +} +static inline int hv_call_add_logical_proc(int node, u32 lp_index, u32 acpi_id) +{ + return -EOPNOTSUPP; +} +static inline int hv_call_create_vp(int node, u64 partition_id, u32 vp_index, u32 flags) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_MSHV_ROOT */ + +#if IS_ENABLED(CONFIG_HYPERV_VTL_MODE) +u8 __init get_vtl(void); +#else +static inline u8 get_vtl(void) { return 0; } +#endif + #endif |
