summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/mshyperv.h63
-rw-r--r--include/hyperv/hvgdk_mini.h115
-rw-r--r--include/hyperv/hvhdk.h46
-rw-r--r--include/hyperv/hvhdk_mini.h128
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blk_types.h5
-rw-r--r--include/linux/compiler_types.h8
-rw-r--r--include/linux/f2fs_fs.h5
-rw-r--r--include/linux/hyperv.h69
-rw-r--r--include/linux/i3c/device.h42
-rw-r--r--include/linux/i3c/master.h10
-rw-r--r--include/linux/phy/phy.h19
-rw-r--r--include/linux/pinctrl/pinconf-generic.h19
-rw-r--r--include/linux/pinctrl/pinmux.h10
-rw-r--r--include/linux/static_call_types.h4
-rw-r--r--include/trace/events/f2fs.h59
-rw-r--r--include/trace/events/io_uring.h12
-rw-r--r--include/uapi/linux/mshv.h116
-rw-r--r--include/uapi/linux/pr.h14
19 files changed, 658 insertions, 104 deletions
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 64ba6bc807d9..ecedab554c80 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -62,6 +62,8 @@ struct ms_hyperv_info {
};
};
u64 shared_gpa_boundary;
+ bool msi_ext_dest_id;
+ bool confidential_vmbus_available;
};
extern struct ms_hyperv_info ms_hyperv;
extern bool hv_nested;
@@ -124,10 +126,12 @@ static inline unsigned int hv_repcomp(u64 status)
/*
* Rep hypercalls. Callers of this functions are supposed to ensure that
- * rep_count and varhead_size comply with Hyper-V hypercall definition.
+ * rep_count, varhead_size, and rep_start comply with Hyper-V hypercall
+ * definition.
*/
-static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
- void *input, void *output)
+static inline u64 hv_do_rep_hypercall_ex(u16 code, u16 rep_count,
+ u16 varhead_size, u16 rep_start,
+ void *input, void *output)
{
u64 control = code;
u64 status;
@@ -135,6 +139,7 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
control |= (u64)varhead_size << HV_HYPERCALL_VARHEAD_OFFSET;
control |= (u64)rep_count << HV_HYPERCALL_REP_COMP_OFFSET;
+ control |= (u64)rep_start << HV_HYPERCALL_REP_START_OFFSET;
do {
status = hv_do_hypercall(control, input, output);
@@ -152,6 +157,14 @@ static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
return status;
}
+/* For the typical case where rep_start is 0 */
+static inline u64 hv_do_rep_hypercall(u16 code, u16 rep_count, u16 varhead_size,
+ void *input, void *output)
+{
+ return hv_do_rep_hypercall_ex(code, rep_count, varhead_size, 0,
+ input, output);
+}
+
/* Generate the guest OS identifier as described in the Hyper-V TLFS */
static inline u64 hv_generate_guest_id(u64 kernel_version)
{
@@ -163,46 +176,6 @@ static inline u64 hv_generate_guest_id(u64 kernel_version)
return guest_id;
}
-#if IS_ENABLED(CONFIG_HYPERV_VMBUS)
-/* Free the message slot and signal end-of-message if required */
-static inline void vmbus_signal_eom(struct hv_message *msg, u32 old_msg_type)
-{
- /*
- * On crash we're reading some other CPU's message page and we need
- * to be careful: this other CPU may already had cleared the header
- * and the host may already had delivered some other message there.
- * In case we blindly write msg->header.message_type we're going
- * to lose it. We can still lose a message of the same type but
- * we count on the fact that there can only be one
- * CHANNELMSG_UNLOAD_RESPONSE and we don't care about other messages
- * on crash.
- */
- if (cmpxchg(&msg->header.message_type, old_msg_type,
- HVMSG_NONE) != old_msg_type)
- return;
-
- /*
- * The cmxchg() above does an implicit memory barrier to
- * ensure the write to MessageType (ie set to
- * HVMSG_NONE) happens before we read the
- * MessagePending and EOMing. Otherwise, the EOMing
- * will not deliver any more messages since there is
- * no empty slot
- */
- if (msg->header.message_flags.msg_pending) {
- /*
- * This will cause message queue rescan to
- * possibly deliver another msg from the
- * hypervisor
- */
- hv_set_msr(HV_MSR_EOM, 0);
- }
-}
-
-extern int vmbus_interrupt;
-extern int vmbus_irq;
-#endif /* CONFIG_HYPERV_VMBUS */
-
int hv_get_hypervisor_version(union hv_hypervisor_version_info *info);
void hv_setup_vmbus_handler(void (*handler)(void));
@@ -336,6 +309,10 @@ bool hv_is_isolation_supported(void);
bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
+void hv_enable_coco_interrupt(unsigned int cpu, unsigned int vector, bool set);
+void hv_para_set_sint_proxy(bool enable);
+u64 hv_para_get_synic_register(unsigned int reg);
+void hv_para_set_synic_register(unsigned int reg, u64 val);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
void hv_setup_dma_ops(struct device *dev, bool coherent);
diff --git a/include/hyperv/hvgdk_mini.h b/include/hyperv/hvgdk_mini.h
index 77abddfc750e..04b18d0e37af 100644
--- a/include/hyperv/hvgdk_mini.h
+++ b/include/hyperv/hvgdk_mini.h
@@ -260,6 +260,7 @@ union hv_hypervisor_version_info {
#define HYPERV_CPUID_VIRT_STACK_PROPERTIES 0x40000082
/* Support for the extended IOAPIC RTE format */
#define HYPERV_VS_PROPERTIES_EAX_EXTENDED_IOAPIC_RTE BIT(2)
+#define HYPERV_VS_PROPERTIES_EAX_CONFIDENTIAL_VMBUS_AVAILABLE BIT(3)
#define HYPERV_HYPERVISOR_PRESENT_BIT 0x80000000
#define HYPERV_CPUID_MIN 0x40000005
@@ -464,18 +465,21 @@ union hv_vp_assist_msr_contents { /* HV_REGISTER_VP_ASSIST_PAGE */
#define HVCALL_RESET_DEBUG_SESSION 0x006b
#define HVCALL_MAP_STATS_PAGE 0x006c
#define HVCALL_UNMAP_STATS_PAGE 0x006d
+#define HVCALL_SET_SYSTEM_PROPERTY 0x006f
#define HVCALL_ADD_LOGICAL_PROCESSOR 0x0076
#define HVCALL_GET_SYSTEM_PROPERTY 0x007b
#define HVCALL_MAP_DEVICE_INTERRUPT 0x007c
#define HVCALL_UNMAP_DEVICE_INTERRUPT 0x007d
#define HVCALL_RETARGET_INTERRUPT 0x007e
+#define HVCALL_NOTIFY_PARTITION_EVENT 0x0087
+#define HVCALL_ENTER_SLEEP_STATE 0x0084
#define HVCALL_NOTIFY_PORT_RING_EMPTY 0x008b
#define HVCALL_REGISTER_INTERCEPT_RESULT 0x0091
#define HVCALL_ASSERT_VIRTUAL_INTERRUPT 0x0094
#define HVCALL_CREATE_PORT 0x0095
#define HVCALL_CONNECT_PORT 0x0096
#define HVCALL_START_VP 0x0099
-#define HVCALL_GET_VP_INDEX_FROM_APIC_ID 0x009a
+#define HVCALL_GET_VP_INDEX_FROM_APIC_ID 0x009a
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_SPACE 0x00af
#define HVCALL_FLUSH_GUEST_PHYSICAL_ADDRESS_LIST 0x00b0
#define HVCALL_SIGNAL_EVENT_DIRECT 0x00c0
@@ -490,8 +494,11 @@ union hv_vp_assist_msr_contents { /* HV_REGISTER_VP_ASSIST_PAGE */
#define HVCALL_GET_VP_STATE 0x00e3
#define HVCALL_SET_VP_STATE 0x00e4
#define HVCALL_GET_VP_CPUID_VALUES 0x00f4
+#define HVCALL_GET_PARTITION_PROPERTY_EX 0x0101
#define HVCALL_MMIO_READ 0x0106
#define HVCALL_MMIO_WRITE 0x0107
+#define HVCALL_DISABLE_HYP_EX 0x010f
+#define HVCALL_MAP_STATS_PAGE2 0x0131
/* HV_HYPERCALL_INPUT */
#define HV_HYPERCALL_RESULT_MASK GENMASK_ULL(15, 0)
@@ -880,6 +887,48 @@ struct hv_get_vp_from_apic_id_in {
u32 apic_ids[];
} __packed;
+union hv_register_vsm_partition_config {
+ u64 as_uint64;
+ struct {
+ u64 enable_vtl_protection : 1;
+ u64 default_vtl_protection_mask : 4;
+ u64 zero_memory_on_reset : 1;
+ u64 deny_lower_vtl_startup : 1;
+ u64 intercept_acceptance : 1;
+ u64 intercept_enable_vtl_protection : 1;
+ u64 intercept_vp_startup : 1;
+ u64 intercept_cpuid_unimplemented : 1;
+ u64 intercept_unrecoverable_exception : 1;
+ u64 intercept_page : 1;
+ u64 mbz : 51;
+ } __packed;
+};
+
+union hv_register_vsm_capabilities {
+ u64 as_uint64;
+ struct {
+ u64 dr6_shared: 1;
+ u64 mbec_vtl_mask: 16;
+ u64 deny_lower_vtl_startup: 1;
+ u64 supervisor_shadow_stack: 1;
+ u64 hardware_hvpt_available: 1;
+ u64 software_hvpt_available: 1;
+ u64 hardware_hvpt_range_bits: 6;
+ u64 intercept_page_available: 1;
+ u64 return_action_available: 1;
+ u64 reserved: 35;
+ } __packed;
+};
+
+union hv_register_vsm_page_offsets {
+ struct {
+ u64 vtl_call_offset : 12;
+ u64 vtl_return_offset : 12;
+ u64 reserved_mbz : 40;
+ } __packed;
+ u64 as_uint64;
+};
+
struct hv_nested_enlightenments_control {
struct {
u32 directhypercall : 1;
@@ -1002,6 +1051,70 @@ enum hv_register_name {
/* VSM */
HV_REGISTER_VSM_VP_STATUS = 0x000D0003,
+
+ /* Synthetic VSM registers */
+ HV_REGISTER_VSM_CODE_PAGE_OFFSETS = 0x000D0002,
+ HV_REGISTER_VSM_CAPABILITIES = 0x000D0006,
+ HV_REGISTER_VSM_PARTITION_CONFIG = 0x000D0007,
+
+#if defined(CONFIG_X86)
+ /* X64 Debug Registers */
+ HV_X64_REGISTER_DR0 = 0x00050000,
+ HV_X64_REGISTER_DR1 = 0x00050001,
+ HV_X64_REGISTER_DR2 = 0x00050002,
+ HV_X64_REGISTER_DR3 = 0x00050003,
+ HV_X64_REGISTER_DR6 = 0x00050004,
+ HV_X64_REGISTER_DR7 = 0x00050005,
+
+ /* X64 Cache control MSRs */
+ HV_X64_REGISTER_MSR_MTRR_CAP = 0x0008000D,
+ HV_X64_REGISTER_MSR_MTRR_DEF_TYPE = 0x0008000E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE0 = 0x00080010,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE1 = 0x00080011,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE2 = 0x00080012,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE3 = 0x00080013,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE4 = 0x00080014,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE5 = 0x00080015,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE6 = 0x00080016,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE7 = 0x00080017,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE8 = 0x00080018,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASE9 = 0x00080019,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEA = 0x0008001A,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEB = 0x0008001B,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEC = 0x0008001C,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASED = 0x0008001D,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEE = 0x0008001E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_BASEF = 0x0008001F,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK0 = 0x00080040,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK1 = 0x00080041,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK2 = 0x00080042,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK3 = 0x00080043,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK4 = 0x00080044,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK5 = 0x00080045,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK6 = 0x00080046,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK7 = 0x00080047,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK8 = 0x00080048,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASK9 = 0x00080049,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKA = 0x0008004A,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKB = 0x0008004B,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKC = 0x0008004C,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKD = 0x0008004D,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKE = 0x0008004E,
+ HV_X64_REGISTER_MSR_MTRR_PHYS_MASKF = 0x0008004F,
+ HV_X64_REGISTER_MSR_MTRR_FIX64K00000 = 0x00080070,
+ HV_X64_REGISTER_MSR_MTRR_FIX16K80000 = 0x00080071,
+ HV_X64_REGISTER_MSR_MTRR_FIX16KA0000 = 0x00080072,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KC0000 = 0x00080073,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KC8000 = 0x00080074,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KD0000 = 0x00080075,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KD8000 = 0x00080076,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KE0000 = 0x00080077,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KE8000 = 0x00080078,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KF0000 = 0x00080079,
+ HV_X64_REGISTER_MSR_MTRR_FIX4KF8000 = 0x0008007A,
+
+ HV_X64_REGISTER_REG_PAGE = 0x0009001C,
+#endif
};
/*
diff --git a/include/hyperv/hvhdk.h b/include/hyperv/hvhdk.h
index b4067ada02cf..469186df7826 100644
--- a/include/hyperv/hvhdk.h
+++ b/include/hyperv/hvhdk.h
@@ -376,6 +376,46 @@ struct hv_input_set_partition_property {
u64 property_value;
} __packed;
+union hv_partition_property_arg {
+ u64 as_uint64;
+ struct {
+ union {
+ u32 arg;
+ u32 vp_index;
+ };
+ u16 reserved0;
+ u8 reserved1;
+ u8 object_type;
+ } __packed;
+};
+
+struct hv_input_get_partition_property_ex {
+ u64 partition_id;
+ u32 property_code; /* enum hv_partition_property_code */
+ u32 padding;
+ union {
+ union hv_partition_property_arg arg_data;
+ u64 arg;
+ };
+} __packed;
+
+/*
+ * NOTE: Should use hv_input_set_partition_property_ex_header to compute this
+ * size, but hv_input_get_partition_property_ex is identical so it suffices
+ */
+#define HV_PARTITION_PROPERTY_EX_MAX_VAR_SIZE \
+ (HV_HYP_PAGE_SIZE - sizeof(struct hv_input_get_partition_property_ex))
+
+union hv_partition_property_ex {
+ u8 buffer[HV_PARTITION_PROPERTY_EX_MAX_VAR_SIZE];
+ struct hv_partition_property_vmm_capabilities vmm_capabilities;
+ /* More fields to be filled in when needed */
+};
+
+struct hv_output_get_partition_property_ex {
+ union hv_partition_property_ex property_value;
+} __packed;
+
enum hv_vp_state_page_type {
HV_VP_STATE_PAGE_REGISTERS = 0,
HV_VP_STATE_PAGE_INTERCEPT_MESSAGE = 1,
@@ -539,9 +579,15 @@ union hv_interrupt_control {
u64 as_uint64;
struct {
u32 interrupt_type; /* enum hv_interrupt_type */
+#if IS_ENABLED(CONFIG_X86)
u32 level_triggered : 1;
u32 logical_dest_mode : 1;
u32 rsvd : 30;
+#elif IS_ENABLED(CONFIG_ARM64)
+ u32 rsvd1 : 2;
+ u32 asserted : 1;
+ u32 rsvd2 : 29;
+#endif
} __packed;
};
diff --git a/include/hyperv/hvhdk_mini.h b/include/hyperv/hvhdk_mini.h
index 858f6a3925b3..41a29bf8ec14 100644
--- a/include/hyperv/hvhdk_mini.h
+++ b/include/hyperv/hvhdk_mini.h
@@ -96,8 +96,34 @@ enum hv_partition_property_code {
HV_PARTITION_PROPERTY_XSAVE_STATES = 0x00060007,
HV_PARTITION_PROPERTY_MAX_XSAVE_DATA_SIZE = 0x00060008,
HV_PARTITION_PROPERTY_PROCESSOR_CLOCK_FREQUENCY = 0x00060009,
+
+ /* Extended properties with larger property values */
+ HV_PARTITION_PROPERTY_VMM_CAPABILITIES = 0x00090007,
};
+#define HV_PARTITION_VMM_CAPABILITIES_BANK_COUNT 1
+#define HV_PARTITION_VMM_CAPABILITIES_RESERVED_BITFIELD_COUNT 59
+
+struct hv_partition_property_vmm_capabilities {
+ u16 bank_count;
+ u16 reserved[3];
+ union {
+ u64 as_uint64[HV_PARTITION_VMM_CAPABILITIES_BANK_COUNT];
+ struct {
+ u64 map_gpa_preserve_adjustable: 1;
+ u64 vmm_can_provide_overlay_gpfn: 1;
+ u64 vp_affinity_property: 1;
+#if IS_ENABLED(CONFIG_ARM64)
+ u64 vmm_can_provide_gic_overlay_locations: 1;
+#else
+ u64 reservedbit3: 1;
+#endif
+ u64 assignable_synthetic_proc_features: 1;
+ u64 reserved0: HV_PARTITION_VMM_CAPABILITIES_RESERVED_BITFIELD_COUNT;
+ } __packed;
+ };
+} __packed;
+
enum hv_snp_status {
HV_SNP_STATUS_NONE = 0,
HV_SNP_STATUS_AVAILABLE = 1,
@@ -114,8 +140,33 @@ enum hv_snp_status {
enum hv_system_property {
/* Add more values when needed */
+ HV_SYSTEM_PROPERTY_SLEEP_STATE = 3,
HV_SYSTEM_PROPERTY_SCHEDULER_TYPE = 15,
HV_DYNAMIC_PROCESSOR_FEATURE_PROPERTY = 21,
+ HV_SYSTEM_PROPERTY_CRASHDUMPAREA = 47,
+};
+
+#define HV_PFN_RANGE_PGBITS 24 /* HV_SPA_PAGE_RANGE_ADDITIONAL_PAGES_BITS */
+union hv_pfn_range { /* HV_SPA_PAGE_RANGE */
+ u64 as_uint64;
+ struct {
+ /* 39:0: base pfn. 63:40: additional pages */
+ u64 base_pfn : 64 - HV_PFN_RANGE_PGBITS;
+ u64 add_pfns : HV_PFN_RANGE_PGBITS;
+ } __packed;
+};
+
+enum hv_sleep_state {
+ HV_SLEEP_STATE_S1 = 1,
+ HV_SLEEP_STATE_S2 = 2,
+ HV_SLEEP_STATE_S3 = 3,
+ HV_SLEEP_STATE_S4 = 4,
+ HV_SLEEP_STATE_S5 = 5,
+ /*
+ * After hypervisor has received this, any follow up sleep
+ * state registration requests will be rejected.
+ */
+ HV_SLEEP_STATE_LOCK = 6
};
enum hv_dynamic_processor_feature_property {
@@ -142,15 +193,50 @@ struct hv_output_get_system_property {
#if IS_ENABLED(CONFIG_X86)
u64 hv_processor_feature_value;
#endif
+ union hv_pfn_range hv_cda_info; /* CrashdumpAreaAddress */
+ u64 hv_tramp_pa; /* CrashdumpTrampolineAddress */
+ };
+} __packed;
+
+struct hv_sleep_state_info {
+ u32 sleep_state; /* enum hv_sleep_state */
+ u8 pm1a_slp_typ;
+ u8 pm1b_slp_typ;
+} __packed;
+
+struct hv_input_set_system_property {
+ u32 property_id; /* enum hv_system_property */
+ u32 reserved;
+ union {
+ /* More fields to be filled in when needed */
+ struct hv_sleep_state_info set_sleep_state_info;
+
+ /*
+ * Add a reserved field to ensure the union is 8-byte aligned as
+ * existing members may not be. This is a temporary measure
+ * until all remaining members are added.
+ */
+ u64 reserved0[8];
};
} __packed;
+struct hv_input_enter_sleep_state { /* HV_INPUT_ENTER_SLEEP_STATE */
+ u32 sleep_state; /* enum hv_sleep_state */
+} __packed;
+
struct hv_input_map_stats_page {
u32 type; /* enum hv_stats_object_type */
u32 padding;
union hv_stats_object_identity identity;
} __packed;
+struct hv_input_map_stats_page2 {
+ u32 type; /* enum hv_stats_object_type */
+ u32 padding;
+ union hv_stats_object_identity identity;
+ u64 map_location;
+} __packed;
+
struct hv_output_map_stats_page {
u64 map_location;
} __packed;
@@ -234,6 +320,48 @@ union hv_gpa_page_access_state {
u8 as_uint8;
} __packed;
+enum hv_crashdump_action {
+ HV_CRASHDUMP_NONE = 0,
+ HV_CRASHDUMP_SUSPEND_ALL_VPS,
+ HV_CRASHDUMP_PREPARE_FOR_STATE_SAVE,
+ HV_CRASHDUMP_STATE_SAVED,
+ HV_CRASHDUMP_ENTRY,
+};
+
+struct hv_partition_event_root_crashdump_input {
+ u32 crashdump_action; /* enum hv_crashdump_action */
+} __packed;
+
+struct hv_input_disable_hyp_ex { /* HV_X64_INPUT_DISABLE_HYPERVISOR_EX */
+ u64 rip;
+ u64 arg;
+} __packed;
+
+struct hv_crashdump_area { /* HV_CRASHDUMP_AREA */
+ u32 version;
+ union {
+ u32 flags_as_uint32;
+ struct {
+ u32 cda_valid : 1;
+ u32 cda_unused : 31;
+ } __packed;
+ };
+ /* more unused fields */
+} __packed;
+
+union hv_partition_event_input {
+ struct hv_partition_event_root_crashdump_input crashdump_input;
+};
+
+enum hv_partition_event {
+ HV_PARTITION_EVENT_ROOT_CRASHDUMP = 2,
+};
+
+struct hv_input_notify_partition_event {
+ u32 event; /* enum hv_partition_event */
+ union hv_partition_event_input input;
+} __packed;
+
struct hv_lp_startup_status {
u64 hv_status;
u64 substatus1;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index eb7254b3dddd..cae9e857aea4 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -1213,6 +1213,24 @@ static inline unsigned short blk_rq_nr_discard_segments(struct request *rq)
return max_t(unsigned short, rq->nr_phys_segments, 1);
}
+/**
+ * blk_rq_nr_bvec - return number of bvecs in a request
+ * @rq: request to calculate bvecs for
+ *
+ * Returns the number of bvecs.
+ */
+static inline unsigned int blk_rq_nr_bvec(struct request *rq)
+{
+ struct req_iterator rq_iter;
+ struct bio_vec bv;
+ unsigned int nr_bvec = 0;
+
+ rq_for_each_bvec(bv, rq, rq_iter)
+ nr_bvec++;
+
+ return nr_bvec;
+}
+
int __blk_rq_map_sg(struct request *rq, struct scatterlist *sglist,
struct scatterlist **last_sg);
static inline int blk_rq_map_sg(struct request *rq, struct scatterlist *sglist)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index cbbcb9051ec3..5dc061d318a4 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -479,10 +479,7 @@ static inline bool op_is_discard(blk_opf_t op)
}
/*
- * Check if a bio or request operation is a zone management operation, with
- * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
- * due to its different handling in the block layer and device response in
- * case of command failure.
+ * Check if a bio or request operation is a zone management operation.
*/
static inline bool op_is_zone_mgmt(enum req_op op)
{
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 3eac51d68426..45875903960e 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -11,6 +11,10 @@
#define __has_builtin(x) (0)
#endif
+/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
+#define ___PASTE(a, b) a##b
+#define __PASTE(a, b) ___PASTE(a, b)
+
#ifndef __ASSEMBLY__
/*
@@ -79,10 +83,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __builtin_warning(x, y...) (1)
#endif /* __CHECKER__ */
-/* Indirect macros required for expanded argument pasting, eg. __LINE__. */
-#define ___PASTE(a,b) a##b
-#define __PASTE(a,b) ___PASTE(a,b)
-
#ifdef __KERNEL__
/* Attributes */
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 6afb4a13b81d..a7880787cad3 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -17,6 +17,7 @@
#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
+#define F2FS_SUM_BLKSIZE 4096 /* only support 4096 byte sum block */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
@@ -441,7 +442,7 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
+#define ENTRIES_IN_SUM (F2FS_SUM_BLKSIZE / 8)
#define SUMMARY_SIZE (7) /* sizeof(struct f2fs_summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
@@ -467,7 +468,7 @@ struct summary_footer {
__le32 check_sum; /* summary checksum */
} __packed;
-#define SUM_JOURNAL_SIZE (F2FS_BLKSIZE - SUM_FOOTER_SIZE -\
+#define SUM_JOURNAL_SIZE (F2FS_SUM_BLKSIZE - SUM_FOOTER_SIZE -\
SUM_ENTRY_SIZE)
#define NAT_JOURNAL_ENTRIES ((SUM_JOURNAL_SIZE - 2) /\
sizeof(struct nat_journal_entry))
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 59826c89171c..dfc516c1c719 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -265,16 +265,18 @@ static inline u32 hv_get_avail_to_write_percent(
* Linux kernel.
*/
-#define VERSION_WS2008 ((0 << 16) | (13))
-#define VERSION_WIN7 ((1 << 16) | (1))
-#define VERSION_WIN8 ((2 << 16) | (4))
-#define VERSION_WIN8_1 ((3 << 16) | (0))
-#define VERSION_WIN10 ((4 << 16) | (0))
-#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
-#define VERSION_WIN10_V5 ((5 << 16) | (0))
-#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
-#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
-#define VERSION_WIN10_V5_3 ((5 << 16) | (3))
+#define VMBUS_MAKE_VERSION(MAJ, MIN) ((((u32)MAJ) << 16) | (MIN))
+#define VERSION_WS2008 VMBUS_MAKE_VERSION(0, 13)
+#define VERSION_WIN7 VMBUS_MAKE_VERSION(1, 1)
+#define VERSION_WIN8 VMBUS_MAKE_VERSION(2, 4)
+#define VERSION_WIN8_1 VMBUS_MAKE_VERSION(3, 0)
+#define VERSION_WIN10 VMBUS_MAKE_VERSION(4, 0)
+#define VERSION_WIN10_V4_1 VMBUS_MAKE_VERSION(4, 1)
+#define VERSION_WIN10_V5 VMBUS_MAKE_VERSION(5, 0)
+#define VERSION_WIN10_V5_1 VMBUS_MAKE_VERSION(5, 1)
+#define VERSION_WIN10_V5_2 VMBUS_MAKE_VERSION(5, 2)
+#define VERSION_WIN10_V5_3 VMBUS_MAKE_VERSION(5, 3)
+#define VERSION_WIN10_V6_0 VMBUS_MAKE_VERSION(6, 0)
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -335,14 +337,22 @@ struct vmbus_channel_offer {
} __packed;
/* Server Flags */
-#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 1
-#define VMBUS_CHANNEL_SERVER_SUPPORTS_TRANSFER_PAGES 2
-#define VMBUS_CHANNEL_SERVER_SUPPORTS_GPADLS 4
-#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x10
-#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x100
-#define VMBUS_CHANNEL_PARENT_OFFER 0x200
-#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x400
-#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
+#define VMBUS_CHANNEL_ENUMERATE_DEVICE_INTERFACE 0x0001
+/*
+ * This flag indicates that the channel is offered by the paravisor, and must
+ * use encrypted memory for the channel ring buffer.
+ */
+#define VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER 0x0002
+/*
+ * This flag indicates that the channel is offered by the paravisor, and must
+ * use encrypted memory for GPA direct packets and additional GPADLs.
+ */
+#define VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY 0x0004
+#define VMBUS_CHANNEL_NAMED_PIPE_MODE 0x0010
+#define VMBUS_CHANNEL_LOOPBACK_OFFER 0x0100
+#define VMBUS_CHANNEL_PARENT_OFFER 0x0200
+#define VMBUS_CHANNEL_REQUEST_MONITORED_NOTIFICATION 0x0400
+#define VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER 0x2000
struct vmpacket_descriptor {
u16 type;
@@ -621,6 +631,12 @@ struct vmbus_channel_relid_released {
u32 child_relid;
} __packed;
+/*
+ * Used by the paravisor only, means that the encrypted ring buffers and
+ * the encrypted external memory are supported
+ */
+#define VMBUS_FEATURE_FLAG_CONFIDENTIAL_CHANNELS 0x10
+
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
@@ -630,7 +646,8 @@ struct vmbus_channel_initiate_contact {
struct {
u8 msg_sint;
u8 msg_vtl;
- u8 reserved[6];
+ u8 reserved[2];
+ u32 feature_flags; /* VMBus version 6.0 */
};
};
u64 monitor_page1;
@@ -1003,6 +1020,10 @@ struct vmbus_channel {
/* boolean to control visibility of sysfs for ring buffer */
bool ring_sysfs_visible;
+ /* The ring buffer is encrypted */
+ bool co_ring_buffer;
+ /* The external memory is encrypted */
+ bool co_external_memory;
};
#define lock_requestor(channel, flags) \
@@ -1027,6 +1048,16 @@ u64 vmbus_request_addr_match(struct vmbus_channel *channel, u64 trans_id,
u64 rqst_addr);
u64 vmbus_request_addr(struct vmbus_channel *channel, u64 trans_id);
+static inline bool is_co_ring_buffer(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_RING_BUFFER);
+}
+
+static inline bool is_co_external_memory(const struct vmbus_channel_offer_channel *o)
+{
+ return !!(o->offer.chn_flags & VMBUS_CHANNEL_CONFIDENTIAL_EXTERNAL_MEMORY);
+}
+
static inline bool is_hvsock_offer(const struct vmbus_channel_offer_channel *o)
{
return !!(o->offer.chn_flags & VMBUS_CHANNEL_TLNPI_PROVIDER_OFFER);
diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h
index 7f136de4b73e..9fcb6410a584 100644
--- a/include/linux/i3c/device.h
+++ b/include/linux/i3c/device.h
@@ -27,7 +27,7 @@
* These are the standard error codes as defined by the I3C specification.
* When -EIO is returned by the i3c_device_do_priv_xfers() or
* i3c_device_send_hdr_cmds() one can check the error code in
- * &struct_i3c_priv_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
+ * &struct_i3c_xfer.err or &struct i3c_hdr_cmd.err to get a better idea of
* what went wrong.
*
*/
@@ -39,20 +39,25 @@ enum i3c_error_code {
};
/**
- * enum i3c_hdr_mode - HDR mode ids
+ * enum i3c_xfer_mode - I3C xfer mode ids
* @I3C_HDR_DDR: DDR mode
* @I3C_HDR_TSP: TSP mode
* @I3C_HDR_TSL: TSL mode
+ * @I3C_SDR: SDR mode (NOT HDR mode)
*/
-enum i3c_hdr_mode {
- I3C_HDR_DDR,
- I3C_HDR_TSP,
- I3C_HDR_TSL,
+enum i3c_xfer_mode {
+ /* The below 3 value (I3C_HDR*) must match GETCAP1 Byte bit position */
+ I3C_HDR_DDR = 0,
+ I3C_HDR_TSP = 1,
+ I3C_HDR_TSL = 2,
+ /* Use for default SDR transfer mode */
+ I3C_SDR = 31,
};
/**
- * struct i3c_priv_xfer - I3C SDR private transfer
+ * struct i3c_xfer - I3C data transfer
* @rnw: encodes the transfer direction. true for a read, false for a write
+ * @cmd: Read/Write command in HDR mode, read: 0x80 - 0xff, write: 0x00 - 0x7f
* @len: transfer length in bytes of the transfer
* @actual_len: actual length in bytes are transferred by the controller
* @data: input/output buffer
@@ -60,8 +65,11 @@ enum i3c_hdr_mode {
* @data.out: output buffer. Must point to a DMA-able buffer
* @err: I3C error code
*/
-struct i3c_priv_xfer {
- u8 rnw;
+struct i3c_xfer {
+ union {
+ u8 rnw;
+ u8 cmd;
+ };
u16 len;
u16 actual_len;
union {
@@ -71,6 +79,9 @@ struct i3c_priv_xfer {
enum i3c_error_code err;
};
+/* keep back compatible */
+#define i3c_priv_xfer i3c_xfer
+
/**
* enum i3c_dcr - I3C DCR values
* @I3C_DCR_GENERIC_DEVICE: generic I3C device
@@ -297,9 +308,15 @@ static __always_inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv,
i3c_i2c_driver_unregister, \
__i2cdrv)
-int i3c_device_do_priv_xfers(struct i3c_device *dev,
- struct i3c_priv_xfer *xfers,
- int nxfers);
+int i3c_device_do_xfers(struct i3c_device *dev, struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
+
+static inline int i3c_device_do_priv_xfers(struct i3c_device *dev,
+ struct i3c_xfer *xfers,
+ int nxfers)
+{
+ return i3c_device_do_xfers(dev, xfers, nxfers, I3C_SDR);
+}
int i3c_device_do_setdasa(struct i3c_device *dev);
@@ -341,5 +358,6 @@ int i3c_device_request_ibi(struct i3c_device *dev,
void i3c_device_free_ibi(struct i3c_device *dev);
int i3c_device_enable_ibi(struct i3c_device *dev);
int i3c_device_disable_ibi(struct i3c_device *dev);
+u32 i3c_device_get_supported_xfer_mode(struct i3c_device *dev);
#endif /* I3C_DEV_H */
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index c52a82dd79a6..2fd850f4678b 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -418,7 +418,11 @@ struct i3c_bus {
* @send_ccc_cmd: send a CCC command
* This method is mandatory.
* @priv_xfers: do one or several private I3C SDR transfers
- * This method is mandatory.
+ * This method is mandatory when i3c_xfers is not implemented. It
+ * is deprecated.
+ * @i3c_xfers: do one or several I3C SDR or HDR transfers
+ * This method is mandatory when priv_xfers is not implemented but
+ * should be implemented instead of priv_xfers.
* @attach_i2c_dev: called every time an I2C device is attached to the bus.
* This is a good place to attach master controller specific
* data to I2C devices.
@@ -474,9 +478,13 @@ struct i3c_master_controller_ops {
const struct i3c_ccc_cmd *cmd);
int (*send_ccc_cmd)(struct i3c_master_controller *master,
struct i3c_ccc_cmd *cmd);
+ /* Deprecated, please use i3c_xfers() */
int (*priv_xfers)(struct i3c_dev_desc *dev,
struct i3c_priv_xfer *xfers,
int nxfers);
+ int (*i3c_xfers)(struct i3c_dev_desc *dev,
+ struct i3c_xfer *xfers,
+ int nxfers, enum i3c_xfer_mode mode);
int (*attach_i2c_dev)(struct i2c_dev_desc *dev);
void (*detach_i2c_dev)(struct i2c_dev_desc *dev);
int (*i2c_xfers)(struct i2c_dev_desc *dev,
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 13add0c2c407..2af0d01ebb39 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -53,6 +53,15 @@ enum phy_media {
PHY_MEDIA_DAC,
};
+enum phy_ufs_state {
+ PHY_UFS_HIBERN8_ENTER,
+ PHY_UFS_HIBERN8_EXIT,
+};
+
+union phy_notify {
+ enum phy_ufs_state ufs_state;
+};
+
/**
* union phy_configure_opts - Opaque generic phy configuration
*
@@ -83,6 +92,7 @@ union phy_configure_opts {
* @set_speed: set the speed of the phy (optional)
* @reset: resetting the phy
* @calibrate: calibrate the phy
+ * @notify_phystate: notify and configure the phy for a particular state
* @release: ops to be performed while the consumer relinquishes the PHY
* @owner: the module owner containing the ops
*/
@@ -132,6 +142,7 @@ struct phy_ops {
int (*connect)(struct phy *phy, int port);
int (*disconnect)(struct phy *phy, int port);
+ int (*notify_phystate)(struct phy *phy, union phy_notify state);
void (*release)(struct phy *phy);
struct module *owner;
};
@@ -255,6 +266,7 @@ int phy_reset(struct phy *phy);
int phy_calibrate(struct phy *phy);
int phy_notify_connect(struct phy *phy, int port);
int phy_notify_disconnect(struct phy *phy, int port);
+int phy_notify_state(struct phy *phy, union phy_notify state);
static inline int phy_get_bus_width(struct phy *phy)
{
return phy->attrs.bus_width;
@@ -412,6 +424,13 @@ static inline int phy_notify_disconnect(struct phy *phy, int index)
return -ENOSYS;
}
+static inline int phy_notify_state(struct phy *phy, union phy_notify state)
+{
+ if (!phy)
+ return 0;
+ return -ENOSYS;
+}
+
static inline int phy_configure(struct phy *phy,
union phy_configure_opts *opts)
{
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index d9245ecec71d..1be4032071c2 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -112,6 +112,12 @@ struct pinctrl_map;
* or latch delay (on outputs) this parameter (in a custom format)
* specifies the clock skew or latch delay. It typically controls how
* many double inverters are put in front of the line.
+ * @PIN_CONFIG_SKEW_DELAY_INPUT_PS: if the pin has independent values for the
+ * programmable skew rate (on inputs) and latch delay (on outputs), then
+ * this parameter specifies the clock skew only. The argument is in ps.
+ * @PIN_CONFIG_SKEW_DELAY_OUPUT_PS: if the pin has independent values for the
+ * programmable skew rate (on inputs) and latch delay (on outputs), then
+ * this parameter specifies the latch delay only. The argument is in ps.
* @PIN_CONFIG_SLEEP_HARDWARE_STATE: indicate this is sleep related state.
* @PIN_CONFIG_SLEW_RATE: if the pin can select slew rate, the argument to
* this parameter (on a custom format) tells the driver which alternative
@@ -147,6 +153,8 @@ enum pin_config_param {
PIN_CONFIG_PERSIST_STATE,
PIN_CONFIG_POWER_SOURCE,
PIN_CONFIG_SKEW_DELAY,
+ PIN_CONFIG_SKEW_DELAY_INPUT_PS,
+ PIN_CONFIG_SKEW_DELAY_OUTPUT_PS,
PIN_CONFIG_SLEEP_HARDWARE_STATE,
PIN_CONFIG_SLEW_RATE,
PIN_CONFIG_END = 0x7F,
@@ -181,21 +189,28 @@ static inline unsigned long pinconf_to_config_packed(enum pin_config_param param
return PIN_CONF_PACKED(param, argument);
}
-#define PCONFDUMP(a, b, c, d) { \
- .param = a, .display = b, .format = c, .has_arg = d \
+#define PCONFDUMP_WITH_VALUES(a, b, c, d, e, f) { \
+ .param = a, .display = b, .format = c, .has_arg = d, \
+ .values = e, .num_values = f \
}
+#define PCONFDUMP(a, b, c, d) PCONFDUMP_WITH_VALUES(a, b, c, d, NULL, 0)
+
struct pin_config_item {
const enum pin_config_param param;
const char * const display;
const char * const format;
bool has_arg;
+ const char * const *values;
+ size_t num_values;
};
struct pinconf_generic_params {
const char * const property;
enum pin_config_param param;
u32 default_value;
+ const char * const *values;
+ size_t num_values;
};
int pinconf_generic_dt_subnode_to_map(struct pinctrl_dev *pctldev,
diff --git a/include/linux/pinctrl/pinmux.h b/include/linux/pinctrl/pinmux.h
index 6db6c3e1ccc2..094bbe2fd6fd 100644
--- a/include/linux/pinctrl/pinmux.h
+++ b/include/linux/pinctrl/pinmux.h
@@ -35,6 +35,16 @@ struct pinctrl_gpio_range;
* name can be used with the generic @pinctrl_ops to retrieve the
* actual pins affected. The applicable groups will be returned in
* @groups and the number of groups in @num_groups
+ * @function_is_gpio: determine if the indicated function selector passed
+ * corresponds to the GPIO function which is used by the accelerated GPIO
+ * functions @gpio_request_enable, @gpio_disable_free and
+ * @gpio_set_direction. When the pin control core can properly determine
+ * if a function is a GPIO function, it is easier to use the @strict mode
+ * on the pin controller. Since a single function is passed, this is
+ * only useful on pin controllers that use a specific function for GPIO,
+ * and that usually presupposes that a one-group-per-pin approach is
+ * used, so that a single function can be set on a single pin to turn
+ * it to GPIO mode.
* @set_mux: enable a certain muxing function with a certain pin group. The
* driver does not need to figure out whether enabling this function
* conflicts some other use of the pins in that group, such collisions
diff --git a/include/linux/static_call_types.h b/include/linux/static_call_types.h
index 5a00b8b2cf9f..cfb6ddeb292b 100644
--- a/include/linux/static_call_types.h
+++ b/include/linux/static_call_types.h
@@ -25,6 +25,8 @@
#define STATIC_CALL_SITE_INIT 2UL /* init section */
#define STATIC_CALL_SITE_FLAGS 3UL
+#ifndef __ASSEMBLY__
+
/*
* The static call site table needs to be created by external tooling (objtool
* or a compiler plugin).
@@ -100,4 +102,6 @@ struct static_call_key {
#endif /* CONFIG_HAVE_STATIC_CALL */
+#endif /* __ASSEMBLY__ */
+
#endif /* _STATIC_CALL_TYPES_H */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index edbbd869078f..df4017dcc701 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -50,6 +50,9 @@ TRACE_DEFINE_ENUM(CP_PAUSE);
TRACE_DEFINE_ENUM(CP_RESIZE);
TRACE_DEFINE_ENUM(EX_READ);
TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
+TRACE_DEFINE_ENUM(CP_PHASE_START_BLOCK_OPS);
+TRACE_DEFINE_ENUM(CP_PHASE_FINISH_BLOCK_OPS);
+TRACE_DEFINE_ENUM(CP_PHASE_FINISH_CHECKPOINT);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -175,6 +178,12 @@ TRACE_DEFINE_ENUM(EX_BLOCK_AGE);
#define S_ALL_PERM (S_ISUID | S_ISGID | S_ISVTX | \
S_IRWXU | S_IRWXG | S_IRWXO)
+#define show_cp_phase(phase) \
+ __print_symbolic(phase, \
+ { CP_PHASE_START_BLOCK_OPS, "start block_ops" }, \
+ { CP_PHASE_FINISH_BLOCK_OPS, "finish block_ops" }, \
+ { CP_PHASE_FINISH_CHECKPOINT, "finish checkpoint" })
+
struct f2fs_sb_info;
struct f2fs_io_info;
struct extent_info;
@@ -204,7 +213,7 @@ DECLARE_EVENT_CLASS(f2fs__inode,
__entry->pino = F2FS_I(inode)->i_pino;
__entry->mode = inode->i_mode;
__entry->nlink = inode->i_nlink;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->advise = F2FS_I(inode)->i_advise;
),
@@ -353,7 +362,7 @@ TRACE_EVENT(f2fs_unlink_enter,
TP_fast_assign(
__entry->dev = dir->i_sb->s_dev;
__entry->ino = dir->i_ino;
- __entry->size = dir->i_size;
+ __entry->size = i_size_read(dir);
__entry->blocks = dir->i_blocks;
__assign_str(name);
),
@@ -433,7 +442,7 @@ DECLARE_EVENT_CLASS(f2fs__truncate_op,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->from = from;
),
@@ -586,6 +595,38 @@ TRACE_EVENT(f2fs_file_write_iter,
__entry->ret)
);
+TRACE_EVENT(f2fs_fadvise,
+
+ TP_PROTO(struct inode *inode, loff_t offset, loff_t len, int advice),
+
+ TP_ARGS(inode, offset, len, advice),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(loff_t, size)
+ __field(loff_t, offset)
+ __field(loff_t, len)
+ __field(int, advice)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->size = i_size_read(inode);
+ __entry->offset = offset;
+ __entry->len = len;
+ __entry->advice = advice;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, i_size = %lld offset:%llu, len:%llu, advise:%d",
+ show_dev_ino(__entry),
+ (unsigned long long)__entry->size,
+ __entry->offset,
+ __entry->len,
+ __entry->advice)
+);
+
TRACE_EVENT(f2fs_map_blocks,
TP_PROTO(struct inode *inode, struct f2fs_map_blocks *map, int flag,
int ret),
@@ -1006,7 +1047,7 @@ TRACE_EVENT(f2fs_fallocate,
__entry->mode = mode;
__entry->offset = offset;
__entry->len = len;
- __entry->size = inode->i_size;
+ __entry->size = i_size_read(inode);
__entry->blocks = inode->i_blocks;
__entry->ret = ret;
),
@@ -1541,26 +1582,26 @@ TRACE_EVENT(f2fs_readpages,
TRACE_EVENT(f2fs_write_checkpoint,
- TP_PROTO(struct super_block *sb, int reason, const char *msg),
+ TP_PROTO(struct super_block *sb, int reason, u16 phase),
- TP_ARGS(sb, reason, msg),
+ TP_ARGS(sb, reason, phase),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(int, reason)
- __string(dest_msg, msg)
+ __field(u16, phase)
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->reason = reason;
- __assign_str(dest_msg);
+ __entry->phase = phase;
),
TP_printk("dev = (%d,%d), checkpoint for %s, state = %s",
show_dev(__entry->dev),
show_cpreason(__entry->reason),
- __get_str(dest_msg))
+ show_cp_phase(__entry->phase))
);
DECLARE_EVENT_CLASS(f2fs_discard,
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
index 45d15460b495..34b31a855ea4 100644
--- a/include/trace/events/io_uring.h
+++ b/include/trace/events/io_uring.h
@@ -133,15 +133,15 @@ TRACE_EVENT(io_uring_file_get,
* io_uring_queue_async_work - called before submitting a new async work
*
* @req: pointer to a submitted request
- * @rw: type of workqueue, hashed or normal
+ * @hashed: whether async work is hashed
*
* Allows to trace asynchronous work submission.
*/
TRACE_EVENT(io_uring_queue_async_work,
- TP_PROTO(struct io_kiocb *req, int rw),
+ TP_PROTO(struct io_kiocb *req, bool hashed),
- TP_ARGS(req, rw),
+ TP_ARGS(req, hashed),
TP_STRUCT__entry (
__field( void *, ctx )
@@ -150,7 +150,7 @@ TRACE_EVENT(io_uring_queue_async_work,
__field( u8, opcode )
__field( unsigned long long, flags )
__field( struct io_wq_work *, work )
- __field( int, rw )
+ __field( bool, hashed )
__string( op_str, io_uring_get_opcode(req->opcode) )
),
@@ -162,7 +162,7 @@ TRACE_EVENT(io_uring_queue_async_work,
__entry->flags = (__force unsigned long long) req->flags;
__entry->opcode = req->opcode;
__entry->work = &req->work;
- __entry->rw = rw;
+ __entry->hashed = hashed;
__assign_str(op_str);
),
@@ -170,7 +170,7 @@ TRACE_EVENT(io_uring_queue_async_work,
TP_printk("ring %p, request %p, user_data 0x%llx, opcode %s, flags 0x%llx, %s queue, work %p",
__entry->ctx, __entry->req, __entry->user_data,
__get_str(op_str), __entry->flags,
- __entry->rw ? "hashed" : "normal", __entry->work)
+ __entry->hashed ? "hashed" : "normal", __entry->work)
);
/**
diff --git a/include/uapi/linux/mshv.h b/include/uapi/linux/mshv.h
index 876bfe4e4227..dee3ece28ce5 100644
--- a/include/uapi/linux/mshv.h
+++ b/include/uapi/linux/mshv.h
@@ -26,6 +26,7 @@ enum {
MSHV_PT_BIT_LAPIC,
MSHV_PT_BIT_X2APIC,
MSHV_PT_BIT_GPA_SUPER_PAGES,
+ MSHV_PT_BIT_CPU_AND_XSAVE_FEATURES,
MSHV_PT_BIT_COUNT,
};
@@ -41,6 +42,8 @@ enum {
* @pt_flags: Bitmask of 1 << MSHV_PT_BIT_*
* @pt_isolation: MSHV_PT_ISOLATION_*
*
+ * This is the initial/v1 version for backward compatibility.
+ *
* Returns a file descriptor to act as a handle to a guest partition.
* At this point the partition is not yet initialized in the hypervisor.
* Some operations must be done with the partition in this state, e.g. setting
@@ -52,6 +55,37 @@ struct mshv_create_partition {
__u64 pt_isolation;
};
+#define MSHV_NUM_CPU_FEATURES_BANKS 2
+
+/**
+ * struct mshv_create_partition_v2
+ *
+ * This is extended version of the above initial MSHV_CREATE_PARTITION
+ * ioctl and allows for following additional parameters:
+ *
+ * @pt_num_cpu_fbanks: Must be set to MSHV_NUM_CPU_FEATURES_BANKS.
+ * @pt_cpu_fbanks: Disabled processor feature banks array.
+ * @pt_disabled_xsave: Disabled xsave feature bits.
+ *
+ * pt_cpu_fbanks and pt_disabled_xsave are passed through as-is to the create
+ * partition hypercall.
+ *
+ * Returns : same as above original mshv_create_partition
+ */
+struct mshv_create_partition_v2 {
+ __u64 pt_flags;
+ __u64 pt_isolation;
+ __u16 pt_num_cpu_fbanks;
+ __u8 pt_rsvd[6]; /* MBZ */
+ __u64 pt_cpu_fbanks[MSHV_NUM_CPU_FEATURES_BANKS];
+ __u64 pt_rsvd1[2]; /* MBZ */
+#if defined(__x86_64__)
+ __u64 pt_disabled_xsave;
+#else
+ __u64 pt_rsvd2; /* MBZ */
+#endif
+} __packed;
+
/* /dev/mshv */
#define MSHV_CREATE_PARTITION _IOW(MSHV_IOCTL, 0x00, struct mshv_create_partition)
@@ -89,7 +123,7 @@ enum {
* @rsvd: MBZ
*
* Map or unmap a region of userspace memory to Guest Physical Addresses (GPA).
- * Mappings can't overlap in GPA space or userspace.
+ * Mappings can't overlap in GPA space.
* To unmap, these fields must match an existing mapping.
*/
struct mshv_user_mem_region {
@@ -288,4 +322,84 @@ struct mshv_get_set_vp_state {
* #define MSHV_ROOT_HVCALL _IOWR(MSHV_IOCTL, 0x07, struct mshv_root_hvcall)
*/
+/* Structure definitions, macros and IOCTLs for mshv_vtl */
+
+#define MSHV_CAP_CORE_API_STABLE 0x0
+#define MSHV_CAP_REGISTER_PAGE 0x1
+#define MSHV_CAP_VTL_RETURN_ACTION 0x2
+#define MSHV_CAP_DR6_SHARED 0x3
+#define MSHV_MAX_RUN_MSG_SIZE 256
+
+struct mshv_vp_registers {
+ __u32 count; /* supports only 1 register at a time */
+ __u32 reserved; /* Reserved for alignment or future use */
+ __u64 regs_ptr; /* pointer to struct hv_register_assoc */
+};
+
+struct mshv_vtl_set_eventfd {
+ __s32 fd;
+ __u32 flag;
+};
+
+struct mshv_vtl_signal_event {
+ __u32 connection_id;
+ __u32 flag;
+};
+
+struct mshv_vtl_sint_post_msg {
+ __u64 message_type;
+ __u32 connection_id;
+ __u32 payload_size; /* Must not exceed HV_MESSAGE_PAYLOAD_BYTE_COUNT */
+ __u64 payload_ptr; /* pointer to message payload (bytes) */
+};
+
+struct mshv_vtl_ram_disposition {
+ __u64 start_pfn;
+ __u64 last_pfn;
+};
+
+struct mshv_vtl_set_poll_file {
+ __u32 cpu;
+ __u32 fd;
+};
+
+struct mshv_vtl_hvcall_setup {
+ __u64 bitmap_array_size; /* stores number of bytes */
+ __u64 allow_bitmap_ptr;
+};
+
+struct mshv_vtl_hvcall {
+ __u64 control; /* Hypercall control code */
+ __u64 input_size; /* Size of the input data */
+ __u64 input_ptr; /* Pointer to the input struct */
+ __u64 status; /* Status of the hypercall (output) */
+ __u64 output_size; /* Size of the output data */
+ __u64 output_ptr; /* Pointer to the output struct */
+};
+
+struct mshv_sint_mask {
+ __u8 mask;
+ __u8 reserved[7];
+};
+
+/* /dev/mshv device IOCTL */
+#define MSHV_CHECK_EXTENSION _IOW(MSHV_IOCTL, 0x00, __u32)
+
+/* vtl device */
+#define MSHV_CREATE_VTL _IOR(MSHV_IOCTL, 0x1D, char)
+#define MSHV_ADD_VTL0_MEMORY _IOW(MSHV_IOCTL, 0x21, struct mshv_vtl_ram_disposition)
+#define MSHV_SET_POLL_FILE _IOW(MSHV_IOCTL, 0x25, struct mshv_vtl_set_poll_file)
+#define MSHV_RETURN_TO_LOWER_VTL _IO(MSHV_IOCTL, 0x27)
+#define MSHV_GET_VP_REGISTERS _IOWR(MSHV_IOCTL, 0x05, struct mshv_vp_registers)
+#define MSHV_SET_VP_REGISTERS _IOW(MSHV_IOCTL, 0x06, struct mshv_vp_registers)
+
+/* VMBus device IOCTLs */
+#define MSHV_SINT_SIGNAL_EVENT _IOW(MSHV_IOCTL, 0x22, struct mshv_vtl_signal_event)
+#define MSHV_SINT_POST_MESSAGE _IOW(MSHV_IOCTL, 0x23, struct mshv_vtl_sint_post_msg)
+#define MSHV_SINT_SET_EVENTFD _IOW(MSHV_IOCTL, 0x24, struct mshv_vtl_set_eventfd)
+#define MSHV_SINT_PAUSE_MESSAGE_STREAM _IOW(MSHV_IOCTL, 0x25, struct mshv_sint_mask)
+
+/* hv_hvcall device */
+#define MSHV_HVCALL_SETUP _IOW(MSHV_IOCTL, 0x1E, struct mshv_vtl_hvcall_setup)
+#define MSHV_HVCALL _IOWR(MSHV_IOCTL, 0x1F, struct mshv_vtl_hvcall)
#endif
diff --git a/include/uapi/linux/pr.h b/include/uapi/linux/pr.h
index d8126415966f..847f3051057a 100644
--- a/include/uapi/linux/pr.h
+++ b/include/uapi/linux/pr.h
@@ -56,6 +56,18 @@ struct pr_clear {
__u32 __pad;
};
+struct pr_read_keys {
+ __u32 generation;
+ __u32 num_keys;
+ __u64 keys_ptr;
+};
+
+struct pr_read_reservation {
+ __u64 key;
+ __u32 generation;
+ __u32 type;
+};
+
#define PR_FL_IGNORE_KEY (1 << 0) /* ignore existing key */
#define IOC_PR_REGISTER _IOW('p', 200, struct pr_registration)
@@ -64,5 +76,7 @@ struct pr_clear {
#define IOC_PR_PREEMPT _IOW('p', 203, struct pr_preempt)
#define IOC_PR_PREEMPT_ABORT _IOW('p', 204, struct pr_preempt)
#define IOC_PR_CLEAR _IOW('p', 205, struct pr_clear)
+#define IOC_PR_READ_KEYS _IOWR('p', 206, struct pr_read_keys)
+#define IOC_PR_READ_RESERVATION _IOR('p', 207, struct pr_read_reservation)
#endif /* _UAPI_PR_H */