diff options
Diffstat (limited to 'include/linux')
274 files changed, 6213 insertions, 3342 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 0687a442fec7..1655c4c23a78 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -107,6 +107,7 @@ enum acpi_irq_model_id { ACPI_IRQ_MODEL_PLATFORM, ACPI_IRQ_MODEL_GIC, ACPI_IRQ_MODEL_LPIC, + ACPI_IRQ_MODEL_RINTC, ACPI_IRQ_MODEL_COUNT }; @@ -386,7 +387,7 @@ extern bool acpi_is_pnp_device(struct acpi_device *); #if defined(CONFIG_ACPI_WMI) || defined(CONFIG_ACPI_WMI_MODULE) -typedef void (*wmi_notify_handler) (u32 value, void *context); +typedef void (*wmi_notify_handler) (union acpi_object *data, void *context); int wmi_instance_count(const char *guid); @@ -401,7 +402,6 @@ extern acpi_status wmi_set_block(const char *guid, u8 instance, extern acpi_status wmi_install_notify_handler(const char *guid, wmi_notify_handler handler, void *data); extern acpi_status wmi_remove_notify_handler(const char *guid); -extern acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out); extern bool wmi_has_guid(const char *guid); extern char *wmi_get_acpi_device_uid(const char *guid); @@ -1343,6 +1343,8 @@ struct acpi_probe_entry { kernel_ulong_t driver_data; }; +void arch_sort_irqchip_probe(struct acpi_probe_entry *ap_head, int nr); + #define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, \ valid, data, fn) \ static const struct acpi_probe_entry __acpi_probe_##name \ @@ -1529,6 +1531,12 @@ void acpi_arm_init(void); static inline void acpi_arm_init(void) { } #endif +#ifdef CONFIG_RISCV +void acpi_riscv_init(void); +#else +static inline void acpi_riscv_init(void) { } +#endif + #ifdef CONFIG_ACPI_PCC void acpi_init_pcc(void); #else diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h index 50d88bf1498d..0ded9220d379 100644 --- a/include/linux/acpi_pmtmr.h +++ b/include/linux/acpi_pmtmr.h @@ -26,6 +26,19 @@ static inline u32 acpi_pm_read_early(void) return acpi_pm_read_verified() & ACPI_PM_MASK; } +/** + * Register callback for suspend and resume event + * + * @cb Callback triggered on suspend and resume + * @data Data passed with the callback + */ +void acpi_pmtmr_register_suspend_resume_callback(void (*cb)(void *data, bool suspend), void *data); + +/** + * Remove registered callback for suspend and resume event + */ +void acpi_pmtmr_unregister_suspend_resume_callback(void); + #else static inline u32 acpi_pm_read_early(void) diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 8c61ccd161ba..1f0a9ff23a2c 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -70,7 +70,7 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct) /* * When percpu variables are required to be defined as weak, static percpu * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION). - * Instead we will accound all module allocations to a single counter. + * Instead we will account all module allocations to a single counter. */ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); @@ -137,7 +137,16 @@ static inline void alloc_tag_sub_check(union codetag_ref *ref) {} /* Caller should verify both ref and tag to be valid */ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) { + alloc_tag_add_check(ref, tag); + if (!ref || !tag) + return; + ref->ct = &tag->ct; +} + +static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) +{ + __alloc_tag_ref_set(ref, tag); /* * We need in increment the call counter every time we have a new * allocation or when we split a large allocation into smaller ones. @@ -147,22 +156,9 @@ static inline void __alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag this_cpu_inc(tag->counters->calls); } -static inline void alloc_tag_ref_set(union codetag_ref *ref, struct alloc_tag *tag) -{ - alloc_tag_add_check(ref, tag); - if (!ref || !tag) - return; - - __alloc_tag_ref_set(ref, tag); -} - static inline void alloc_tag_add(union codetag_ref *ref, struct alloc_tag *tag, size_t bytes) { - alloc_tag_add_check(ref, tag); - if (!ref || !tag) - return; - - __alloc_tag_ref_set(ref, tag); + alloc_tag_ref_set(ref, tag); this_cpu_add(tag->counters->bytes, bytes); } diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index 958a55bcc708..dda2f3ea89cb 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h @@ -105,7 +105,7 @@ enum amba_vendor { AMBA_VENDOR_LSI = 0xb6, }; -extern struct bus_type amba_bustype; +extern const struct bus_type amba_bustype; #define to_amba_device(d) container_of_const(d, struct amba_device, dev) diff --git a/include/linux/args.h b/include/linux/args.h index 8ff60a54eb7d..2e8e65d975c7 100644 --- a/include/linux/args.h +++ b/include/linux/args.h @@ -17,9 +17,9 @@ * that as _n. */ -/* This counts to 12. Any more, it will return 13th argument. */ -#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n -#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) +/* This counts to 15. Any more, it will return 16th argument. */ +#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, _15, _n, X...) _n +#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0) /* Concatenate two parameters, but allow them to be expanded beforehand. */ #define __CONCAT(a, b) a ## b diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 083f85653716..f59099a213d0 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h @@ -115,6 +115,70 @@ /* KVM "vendor specific" services */ #define ARM_SMCCC_KVM_FUNC_FEATURES 0 #define ARM_SMCCC_KVM_FUNC_PTP 1 +/* Start of pKVM hypercall range */ +#define ARM_SMCCC_KVM_FUNC_HYP_MEMINFO 2 +#define ARM_SMCCC_KVM_FUNC_MEM_SHARE 3 +#define ARM_SMCCC_KVM_FUNC_MEM_UNSHARE 4 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_5 5 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_6 6 +#define ARM_SMCCC_KVM_FUNC_MMIO_GUARD 7 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_8 8 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_9 9 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_10 10 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_11 11 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_12 12 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_13 13 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_14 14 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_15 15 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_16 16 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_17 17 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_18 18 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_19 19 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_20 20 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_21 21 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_22 22 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_23 23 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_24 24 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_25 25 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_26 26 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_27 27 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_28 28 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_29 29 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_30 30 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_31 31 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_32 32 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_33 33 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_34 34 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_35 35 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_36 36 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_37 37 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_38 38 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_39 39 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_40 40 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_41 41 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_42 42 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_43 43 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_44 44 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_45 45 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_46 46 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_47 47 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_48 48 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_49 49 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_50 50 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_51 51 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_52 52 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_53 53 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_54 54 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_55 55 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_56 56 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_57 57 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_58 58 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_59 59 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_60 60 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_61 61 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_62 62 +#define ARM_SMCCC_KVM_FUNC_PKVM_RESV_63 63 +/* End of pKVM hypercall range */ #define ARM_SMCCC_KVM_FUNC_FEATURES_2 127 #define ARM_SMCCC_KVM_NUM_FUNCS 128 @@ -137,6 +201,30 @@ ARM_SMCCC_OWNER_VENDOR_HYP, \ ARM_SMCCC_KVM_FUNC_PTP) +#define ARM_SMCCC_VENDOR_HYP_KVM_HYP_MEMINFO_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_HYP_MEMINFO) + +#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_SHARE_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_MEM_SHARE) + +#define ARM_SMCCC_VENDOR_HYP_KVM_MEM_UNSHARE_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_MEM_UNSHARE) + +#define ARM_SMCCC_VENDOR_HYP_KVM_MMIO_GUARD_FUNC_ID \ + ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ + ARM_SMCCC_SMC_64, \ + ARM_SMCCC_OWNER_VENDOR_HYP, \ + ARM_SMCCC_KVM_FUNC_MMIO_GUARD) + /* ptp_kvm counter type ID */ #define KVM_PTP_VIRT_COUNTER 0 #define KVM_PTP_PHYS_COUNTER 1 diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h index 89683f31ae12..a28e2a6a13d0 100644 --- a/include/linux/arm_ffa.h +++ b/include/linux/arm_ffa.h @@ -73,6 +73,11 @@ #define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88) #define FFA_MEM_PERM_SET FFA_SMC_32(0x89) #define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89) +#define FFA_CONSOLE_LOG FFA_SMC_32(0x8A) +#define FFA_PARTITION_INFO_GET_REGS FFA_SMC_64(0x8B) +#define FFA_EL3_INTR_HANDLE FFA_SMC_32(0x8C) +#define FFA_MSG_SEND_DIRECT_REQ2 FFA_SMC_64(0x8D) +#define FFA_MSG_SEND_DIRECT_RESP2 FFA_SMC_64(0x8E) /* * For some calls it is necessary to use SMC64 to pass or return 64-bit values. @@ -265,6 +270,11 @@ struct ffa_indirect_msg_hdr { u32 size; }; +/* For use with FFA_MSG_SEND_DIRECT_{REQ,RESP}2 which pass data via registers */ +struct ffa_send_direct_data2 { + unsigned long data[14]; /* x4-x17 */ +}; + struct ffa_mem_region_addr_range { /* The base IPA of the constituent memory region, aligned to 4 kiB */ u64 address; @@ -426,6 +436,8 @@ struct ffa_msg_ops { int (*sync_send_receive)(struct ffa_device *dev, struct ffa_send_direct_data *data); int (*indirect_send)(struct ffa_device *dev, void *buf, size_t sz); + int (*sync_send_receive2)(struct ffa_device *dev, const uuid_t *uuid, + struct ffa_send_direct_data2 *data); }; struct ffa_mem_ops { diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h index 8e177b67e82f..f41395264dca 100644 --- a/include/linux/avf/virtchnl.h +++ b/include/linux/avf/virtchnl.h @@ -247,6 +247,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource); /* used to negotiate communicating link speeds in Mbps */ #define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7) #define VIRTCHNL_VF_OFFLOAD_CRC BIT(10) +#define VIRTCHNL_VF_OFFLOAD_TC_U32 BIT(11) #define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15) #define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16) #define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17) @@ -1121,6 +1122,7 @@ enum virtchnl_vfr_states { }; #define VIRTCHNL_MAX_NUM_PROTO_HDRS 32 +#define VIRTCHNL_MAX_SIZE_RAW_PACKET 1024 #define PROTO_HDR_SHIFT 5 #define PROTO_HDR_FIELD_START(proto_hdr_type) ((proto_hdr_type) << PROTO_HDR_SHIFT) #define PROTO_HDR_FIELD_MASK ((1UL << PROTO_HDR_SHIFT) - 1) @@ -1266,13 +1268,22 @@ struct virtchnl_proto_hdrs { u8 pad[3]; /** * specify where protocol header start from. + * must be 0 when sending a raw packet request. * 0 - from the outer layer * 1 - from the first inner layer * 2 - from the second inner layer * .... **/ int count; /* the proto layers must < VIRTCHNL_MAX_NUM_PROTO_HDRS */ - struct virtchnl_proto_hdr proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; + union { + struct virtchnl_proto_hdr + proto_hdr[VIRTCHNL_MAX_NUM_PROTO_HDRS]; + struct { + u16 pkt_len; + u8 spec[VIRTCHNL_MAX_SIZE_RAW_PACKET]; + u8 mask[VIRTCHNL_MAX_SIZE_RAW_PACKET]; + } raw; + }; }; VIRTCHNL_CHECK_STRUCT_LEN(2312, virtchnl_proto_hdrs); diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h index 68da8dba5162..dba41b65ae0d 100644 --- a/include/linux/bcma/bcma_driver_pci.h +++ b/include/linux/bcma/bcma_driver_pci.h @@ -203,7 +203,7 @@ struct pci_dev; #define BCMA_CORE_PCI_MDIO_RXCTRL0 0x840 /* PCIE Root Capability Register bits (Host mode only) */ -#define BCMA_CORE_PCI_RC_CRS_VISIBILITY 0x0001 +#define BCMA_CORE_PCI_RC_RRS_VISIBILITY 0x0001 struct bcma_drv_pci; struct bcma_bus; diff --git a/include/linux/bio.h b/include/linux/bio.h index a46e2047bea4..faceadb040f9 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h @@ -324,8 +324,8 @@ static inline void bio_next_folio(struct folio_iter *fi, struct bio *bio) void bio_trim(struct bio *bio, sector_t offset, sector_t size); extern struct bio *bio_split(struct bio *bio, int sectors, gfp_t gfp, struct bio_set *bs); -struct bio *bio_split_rw(struct bio *bio, const struct queue_limits *lim, - unsigned *segs, struct bio_set *bs, unsigned max_bytes); +int bio_split_rw_at(struct bio *bio, const struct queue_limits *lim, + unsigned *segs, unsigned max_bytes); /** * bio_next_split - get next @sectors from a bio, splitting if necessary diff --git a/include/linux/blk-integrity.h b/include/linux/blk-integrity.h index de98049b7ded..676f8f860c47 100644 --- a/include/linux/blk-integrity.h +++ b/include/linux/blk-integrity.h @@ -25,9 +25,10 @@ static inline bool queue_limits_stack_integrity_bdev(struct queue_limits *t, } #ifdef CONFIG_BLK_DEV_INTEGRITY -int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, - struct scatterlist *); +int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); +int blk_rq_integrity_map_user(struct request *rq, void __user *ubuf, + ssize_t bytes, u32 seed); static inline bool blk_integrity_queue_supports_integrity(struct request_queue *q) @@ -96,12 +97,18 @@ static inline int blk_rq_count_integrity_sg(struct request_queue *q, { return 0; } -static inline int blk_rq_map_integrity_sg(struct request_queue *q, - struct bio *b, +static inline int blk_rq_map_integrity_sg(struct request *q, struct scatterlist *s) { return 0; } +static inline int blk_rq_integrity_map_user(struct request *rq, + void __user *ubuf, + ssize_t bytes, + u32 seed) +{ + return -EINVAL; +} static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) { return NULL; diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8d304b1d16b1..4fecf46ef681 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -149,10 +149,7 @@ struct request { * physical address coalescing is performed. */ unsigned short nr_phys_segments; - -#ifdef CONFIG_BLK_DEV_INTEGRITY unsigned short nr_integrity_segments; -#endif #ifdef CONFIG_BLK_INLINE_ENCRYPTION struct bio_crypt_ctx *crypt_ctx; diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 36ed96133217..dce7615c35e7 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -71,6 +71,9 @@ struct block_device { struct partition_meta_info *bd_meta_info; int bd_writers; +#ifdef CONFIG_SECURITY + void *bd_security; +#endif /* * keep this out-of-line as it's both big and not needed in the fast * path @@ -248,11 +251,9 @@ struct bio { struct bio_crypt_ctx *bi_crypt_context; #endif - union { #if defined(CONFIG_BLK_DEV_INTEGRITY) - struct bio_integrity_payload *bi_integrity; /* data integrity */ + struct bio_integrity_payload *bi_integrity; /* data integrity */ #endif - }; unsigned short bi_vcnt; /* how many bio_vec's */ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index b7664d593486..50c3b959da28 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -968,8 +968,6 @@ static inline void blk_queue_disable_write_zeroes(struct request_queue *q) /* * Access functions for manipulating queue properties */ -extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); -extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); extern void blk_set_queue_depth(struct request_queue *q, unsigned int depth); extern void blk_set_stacking_limits(struct queue_limits *lim); extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, @@ -1187,7 +1185,8 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q) return q->limits.max_segment_size; } -static inline unsigned int queue_limits_max_zone_append_sectors(struct queue_limits *l) +static inline unsigned int +queue_limits_max_zone_append_sectors(const struct queue_limits *l) { unsigned int max_sectors = min(l->chunk_sectors, l->max_hw_sectors); diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 3b94ec161e8c..19d8ca8ac960 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -294,6 +294,7 @@ struct bpf_map { * same prog type, JITed flag and xdp_has_frags flag. */ struct { + const struct btf_type *attach_func_proto; spinlock_t lock; enum bpf_prog_type type; bool jited; @@ -694,6 +695,11 @@ enum bpf_type_flag { /* DYNPTR points to xdp_buff */ DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), + /* Memory must be aligned on some architectures, used in combination with + * MEM_FIXED_SIZE. + */ + MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS), + __BPF_TYPE_FLAG_MAX, __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, }; @@ -731,8 +737,6 @@ enum bpf_arg_type { ARG_ANYTHING, /* any (initialized) argument is ok */ ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ - ARG_PTR_TO_INT, /* pointer to int */ - ARG_PTR_TO_LONG, /* pointer to long */ ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ @@ -743,7 +747,7 @@ enum bpf_arg_type { ARG_PTR_TO_STACK, /* pointer to stack */ ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ - ARG_PTR_TO_KPTR, /* pointer to referenced kptr */ + ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */ ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ __BPF_ARG_TYPE_MAX, @@ -807,6 +811,12 @@ struct bpf_func_proto { bool gpl_only; bool pkt_access; bool might_sleep; + /* set to true if helper follows contract for llvm + * attribute bpf_fastcall: + * - void functions do not scratch r0 + * - functions taking N arguments scratch only registers r1-rN + */ + bool allow_fastcall; enum bpf_return_type ret_type; union { struct { @@ -919,6 +929,7 @@ static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); */ struct bpf_insn_access_aux { enum bpf_reg_type reg_type; + bool is_ldsx; union { int ctx_field_size; struct { @@ -927,6 +938,7 @@ struct bpf_insn_access_aux { }; }; struct bpf_verifier_log *log; /* for verbose logs */ + bool is_retval; /* is accessing function return value ? */ }; static inline void @@ -965,6 +977,8 @@ struct bpf_verifier_ops { struct bpf_insn_access_aux *info); int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, const struct bpf_prog *prog); + int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog, + s16 ctx_stack_off); int (*gen_ld_abs)(const struct bpf_insn *orig, struct bpf_insn *insn_buf); u32 (*convert_ctx_access)(enum bpf_access_type type, @@ -1795,6 +1809,7 @@ struct bpf_struct_ops_common_value { #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) bool bpf_struct_ops_get(const void *kdata); void bpf_struct_ops_put(const void *kdata); +int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff); int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value); int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, @@ -1851,6 +1866,10 @@ static inline void bpf_module_put(const void *data, struct module *owner) { module_put(owner); } +static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff) +{ + return -ENOTSUPP; +} static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, void *value) @@ -2227,7 +2246,16 @@ void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); struct bpf_map *bpf_map_get(u32 ufd); struct bpf_map *bpf_map_get_with_uref(u32 ufd); -struct bpf_map *__bpf_map_get(struct fd f); + +static inline struct bpf_map *__bpf_map_get(struct fd f) +{ + if (fd_empty(f)) + return ERR_PTR(-EBADF); + if (unlikely(fd_file(f)->f_op != &bpf_map_fops)) + return ERR_PTR(-EINVAL); + return fd_file(f)->private_data; +} + void bpf_map_inc(struct bpf_map *map); void bpf_map_inc_with_uref(struct bpf_map *map); struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref); @@ -3184,7 +3212,9 @@ extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; extern const struct bpf_func_proto bpf_get_current_comm_proto; extern const struct bpf_func_proto bpf_get_stackid_proto; extern const struct bpf_func_proto bpf_get_stack_proto; +extern const struct bpf_func_proto bpf_get_stack_sleepable_proto; extern const struct bpf_func_proto bpf_get_task_stack_proto; +extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto; extern const struct bpf_func_proto bpf_get_stackid_proto_pe; extern const struct bpf_func_proto bpf_get_stack_proto_pe; extern const struct bpf_func_proto bpf_sock_map_update_proto; @@ -3192,6 +3222,7 @@ extern const struct bpf_func_proto bpf_sock_hash_update_proto; extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto; +extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto; extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; extern const struct bpf_func_proto bpf_msg_redirect_map_proto; extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; diff --git a/include/linux/bpf_lsm.h b/include/linux/bpf_lsm.h index 1de7ece5d36d..aefcd6564251 100644 --- a/include/linux/bpf_lsm.h +++ b/include/linux/bpf_lsm.h @@ -9,6 +9,7 @@ #include <linux/sched.h> #include <linux/bpf.h> +#include <linux/bpf_verifier.h> #include <linux/lsm_hooks.h> #ifdef CONFIG_BPF_LSM @@ -45,6 +46,8 @@ void bpf_inode_storage_free(struct inode *inode); void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func); +int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *range); #else /* !CONFIG_BPF_LSM */ static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id) @@ -78,6 +81,11 @@ static inline void bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, { } +static inline int bpf_lsm_get_retval_range(const struct bpf_prog *prog, + struct bpf_retval_range *range) +{ + return -EOPNOTSUPP; +} #endif /* CONFIG_BPF_LSM */ #endif /* _LINUX_BPF_LSM_H */ diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 7b776dae36e5..4513372c5bc8 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h @@ -23,6 +23,8 @@ * (in the "-8,-16,...,-512" form) */ #define TMP_STR_BUF_LEN 320 +/* Patch buffer size */ +#define INSN_BUF_SIZE 32 /* Liveness marks, used for registers and spilled-regs (in stack slots). * Read marks propagate upwards until they find a write mark; they record that @@ -371,6 +373,10 @@ struct bpf_jmp_history_entry { u32 prev_idx : 22; /* special flags, e.g., whether insn is doing register stack spill/load */ u32 flags : 10; + /* additional registers that need precision tracking when this + * jump is backtracked, vector of six 10-bit records + */ + u64 linked_regs; }; /* Maximum number of register states that can exist at once */ @@ -572,6 +578,14 @@ struct bpf_insn_aux_data { bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */ u8 alu_state; /* used in combination with alu_limit */ + /* true if STX or LDX instruction is a part of a spill/fill + * pattern for a bpf_fastcall call. + */ + u8 fastcall_pattern:1; + /* for CALL instructions, a number of spill/fill pairs in the + * bpf_fastcall pattern. + */ + u8 fastcall_spills_num:3; /* below fields are initialized once */ unsigned int orig_idx; /* original instruction index */ @@ -641,6 +655,10 @@ struct bpf_subprog_info { u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ u16 stack_depth; /* max. stack depth used by this function */ u16 stack_extra; + /* offsets in range [stack_depth .. fastcall_stack_off) + * are used for bpf_fastcall spills and fills. + */ + s16 fastcall_stack_off; bool has_tail_call: 1; bool tail_call_reachable: 1; bool has_ld_abs: 1; @@ -648,6 +666,8 @@ struct bpf_subprog_info { bool is_async_cb: 1; bool is_exception_cb: 1; bool args_cached: 1; + /* true if bpf_fastcall stack region is used by functions that can't be inlined */ + bool keep_fastcall_stack: 1; u8 arg_cnt; struct bpf_subprog_arg_info args[MAX_BPF_FUNC_REG_ARGS]; @@ -762,6 +782,8 @@ struct bpf_verifier_env { * e.g., in reg_type_str() to generate reg_type string */ char tmp_str_buf[TMP_STR_BUF_LEN]; + struct bpf_insn insn_buf[INSN_BUF_SIZE]; + struct bpf_insn epilogue_buf[INSN_BUF_SIZE]; }; static inline struct bpf_func_info_aux *subprog_aux(struct bpf_verifier_env *env, int subprog) @@ -905,6 +927,11 @@ static inline bool type_is_sk_pointer(enum bpf_reg_type type) type == PTR_TO_XDP_SOCK; } +static inline bool type_may_be_null(u32 type) +{ + return type & PTR_MAYBE_NULL; +} + static inline void mark_reg_scratched(struct bpf_verifier_env *env, u32 regno) { env->scratched_regs |= 1U << regno; diff --git a/include/linux/btf.h b/include/linux/btf.h index cffb43133c68..b8a583194c4a 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h @@ -580,6 +580,7 @@ bool btf_is_prog_ctx_type(struct bpf_verifier_log *log, const struct btf *btf, int get_kern_ctx_btf_id(struct bpf_verifier_log *log, enum bpf_prog_type prog_type); bool btf_types_are_same(const struct btf *btf1, u32 id1, const struct btf *btf2, u32 id2); +int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx); #else static inline const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id) @@ -654,6 +655,10 @@ static inline bool btf_types_are_same(const struct btf *btf1, u32 id1, { return false; } +static inline int btf_check_iter_arg(struct btf *btf, const struct btf_type *func, int arg_idx) +{ + return -EOPNOTSUPP; +} #endif static inline bool btf_type_is_struct_ptr(struct btf *btf, const struct btf_type *t) diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 14acf1bbe0ce..932139c5d46f 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h @@ -199,8 +199,7 @@ void folio_set_bh(struct buffer_head *bh, struct folio *folio, unsigned long offset); struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size, gfp_t gfp); -struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size, - bool retry); +struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size); struct buffer_head *create_empty_buffers(struct folio *folio, unsigned long blocksize, unsigned long b_state); void end_buffer_read_sync(struct buffer_head *bh, int uptodate); @@ -258,18 +257,18 @@ int __block_write_full_folio(struct inode *inode, struct folio *folio, int block_read_full_folio(struct folio *, get_block_t *); bool block_is_partially_uptodate(struct folio *, size_t from, size_t count); int block_write_begin(struct address_space *mapping, loff_t pos, unsigned len, - struct page **pagep, get_block_t *get_block); -int __block_write_begin(struct page *page, loff_t pos, unsigned len, + struct folio **foliop, get_block_t *get_block); +int __block_write_begin(struct folio *folio, loff_t pos, unsigned len, get_block_t *get_block); int block_write_end(struct file *, struct address_space *, - loff_t, unsigned, unsigned, - struct page *, void *); + loff_t, unsigned len, unsigned copied, + struct folio *, void *); int generic_write_end(struct file *, struct address_space *, - loff_t, unsigned, unsigned, - struct page *, void *); + loff_t, unsigned len, unsigned copied, + struct folio *, void *); void folio_zero_new_buffers(struct folio *folio, size_t from, size_t to); int cont_write_begin(struct file *, struct address_space *, loff_t, - unsigned, struct page **, void **, + unsigned, struct folio **, void **, get_block_t *, loff_t *); int generic_cont_expand_simple(struct inode *inode, loff_t size); void block_commit_write(struct page *page, unsigned int from, unsigned int to); diff --git a/include/linux/buildid.h b/include/linux/buildid.h index 20aa3c2d89f7..014a88c41073 100644 --- a/include/linux/buildid.h +++ b/include/linux/buildid.h @@ -7,8 +7,8 @@ #define BUILD_ID_SIZE_MAX 20 struct vm_area_struct; -int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, - __u32 *size); +int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); +int build_id_parse_nofault(struct vm_area_struct *vma, unsigned char *build_id, __u32 *size); int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size); #if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID) || IS_ENABLED(CONFIG_VMCORE_INFO) diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index ae04035b6cbe..47ae4c4d924c 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -172,7 +172,11 @@ struct cgroup_subsys_state { /* reference count - access via css_[try]get() and css_put() */ struct percpu_ref refcnt; - /* siblings list anchored at the parent's ->children */ + /* + * siblings list anchored at the parent's ->children + * + * linkage is protected by cgroup_mutex or RCU + */ struct list_head sibling; struct list_head children; @@ -210,6 +214,14 @@ struct cgroup_subsys_state { * fields of the containing structure. */ struct cgroup_subsys_state *parent; + + /* + * Keep track of total numbers of visible descendant CSSes. + * The total number of dying CSSes is tracked in + * css->cgroup->nr_dying_subsys[ssid]. + * Protected by cgroup_mutex. + */ + int nr_descendants; }; /* @@ -470,6 +482,12 @@ struct cgroup { /* Private pointers for each registered subsystem */ struct cgroup_subsys_state __rcu *subsys[CGROUP_SUBSYS_COUNT]; + /* + * Keep track of total number of dying CSSes at and below this cgroup. + * Protected by cgroup_mutex. + */ + int nr_dying_subsys[CGROUP_SUBSYS_COUNT]; + struct cgroup_root *root; /* @@ -775,6 +793,11 @@ struct cgroup_subsys { extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; +struct cgroup_of_peak { + unsigned long value; + struct list_head list; +}; + /** * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups * @tsk: target task diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index c60ba0ab1462..f8ef47f8a634 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h @@ -11,6 +11,7 @@ #include <linux/sched.h> #include <linux/nodemask.h> +#include <linux/list.h> #include <linux/rculist.h> #include <linux/cgroupstats.h> #include <linux/fs.h> @@ -28,8 +29,6 @@ struct kernel_clone_args; -#ifdef CONFIG_CGROUPS - /* * All weight knobs on the default hierarchy should use the following min, * default and max values. The default value is the logarithmic center of @@ -39,6 +38,8 @@ struct kernel_clone_args; #define CGROUP_WEIGHT_DFL 100 #define CGROUP_WEIGHT_MAX 10000 +#ifdef CONFIG_CGROUPS + enum { CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */ CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */ @@ -854,4 +855,6 @@ static inline void cgroup_bpf_put(struct cgroup *cgrp) {} struct cgroup *task_get_cgroup1(struct task_struct *tsk, int hierarchy_id); +struct cgroup_of_peak *of_peak(struct kernfs_open_file *of); + #endif /* _LINUX_CGROUP_H */ diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index d9e613803df1..a3d3e888cf1f 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -98,7 +98,7 @@ const volatile void * __must_check_fn(const volatile void *val) * DEFINE_CLASS(fdget, struct fd, fdput(_T), fdget(fd), int fd) * * CLASS(fdget, f)(fd); - * if (!f.file) + * if (!fd_file(f)) * return -EBADF; * * // use 'f' without concern diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 4a537260f655..7e43caabb54b 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -394,6 +394,20 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name, __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \ NULL, (flags), (fixed_rate), 0, 0, true) /** + * devm_clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with + * the clock framework + * @dev: device that is registering this clock + * @name: name of this clock + * @parent_data: parent clk data + * @flags: framework-specific flags + * @fixed_rate: non-adjustable clock rate + */ +#define devm_clk_hw_register_fixed_rate_parent_data(dev, name, parent_data, flags, \ + fixed_rate) \ + __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \ + (parent_data), (flags), (fixed_rate), 0, \ + 0, true) +/** * clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with * the clock framework * @dev: device that is registering this clock diff --git a/include/linux/clk.h b/include/linux/clk.h index 0fa56d672532..851a0f2cf42c 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h @@ -641,6 +641,32 @@ struct clk *devm_clk_get_optional_prepared(struct device *dev, const char *id); struct clk *devm_clk_get_optional_enabled(struct device *dev, const char *id); /** + * devm_clk_get_optional_enabled_with_rate - devm_clk_get_optional() + + * clk_set_rate() + + * clk_prepare_enable() + * @dev: device for clock "consumer" + * @id: clock consumer ID + * @rate: new clock rate + * + * Context: May sleep. + * + * Return: a struct clk corresponding to the clock producer, or + * valid IS_ERR() condition containing errno. The implementation + * uses @dev and @id to determine the clock consumer, and thereby + * the clock producer. If no such clk is found, it returns NULL + * which serves as a dummy clk. That's the only difference compared + * to devm_clk_get_enabled(). + * + * The returned clk (if valid) is prepared and enabled and rate was set. + * + * The clock will automatically be disabled, unprepared and freed + * when the device is unbound from the bus. + */ +struct clk *devm_clk_get_optional_enabled_with_rate(struct device *dev, + const char *id, + unsigned long rate); + +/** * devm_get_clk_from_child - lookup and obtain a managed reference to a * clock producer from child node. * @dev: device for clock "consumer" @@ -982,6 +1008,13 @@ static inline struct clk *devm_clk_get_optional_enabled(struct device *dev, return NULL; } +static inline struct clk * +devm_clk_get_optional_enabled_with_rate(struct device *dev, const char *id, + unsigned long rate) +{ + return NULL; +} + static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, struct clk_bulk_data *clks) { diff --git a/include/linux/cma.h b/include/linux/cma.h index 9db877506ea8..d15b64f51336 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -52,4 +52,20 @@ extern bool cma_release(struct cma *cma, const struct page *pages, unsigned long extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data); extern void cma_reserve_pages_on_error(struct cma *cma); + +#ifdef CONFIG_CMA +struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); +bool cma_free_folio(struct cma *cma, const struct folio *folio); +#else +static inline struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp) +{ + return NULL; +} + +static inline bool cma_free_folio(struct cma *cma, const struct folio *folio) +{ + return false; +} +#endif + #endif diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 2df665fa2964..ec55bcce4146 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -242,6 +242,9 @@ static inline void *offset_to_ptr(const int *off) /* &a[0] degrades to a pointer: a different type from an array */ #define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0])) +/* Require C Strings (i.e. NUL-terminated) lack the "nonstring" attribute. */ +#define __must_be_cstr(p) BUILD_BUG_ON_ZERO(__annotated(p, nonstring)) + /* * This returns a constant expression while determining if an argument is * a constant expression, most importantly without evaluating the argument. diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h index f14c275950b5..1a957ea2f4fe 100644 --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h @@ -421,6 +421,13 @@ struct ftrace_likely_data { #define __member_size(p) __builtin_object_size(p, 1) #endif +/* Determine if an attribute has been applied to a variable. */ +#if __has_builtin(__builtin_has_attribute) +#define __annotated(var, attr) __builtin_has_attribute(var, attr) +#else +#define __annotated(var, attr) (false) +#endif + /* * Some versions of gcc do not mark 'asm goto' volatile: * diff --git a/include/linux/console.h b/include/linux/console.h index 31a8f5b85f5d..eba367bf605d 100644 --- a/include/linux/console.h +++ b/include/linux/console.h @@ -16,7 +16,9 @@ #include <linux/atomic.h> #include <linux/bits.h> +#include <linux/irq_work.h> #include <linux/rculist.h> +#include <linux/rcuwait.h> #include <linux/types.h> #include <linux/vesa.h> @@ -303,7 +305,7 @@ struct nbcon_write_context { /** * struct console - The console descriptor structure * @name: The name of the console driver - * @write: Write callback to output messages (Optional) + * @write: Legacy write callback to output messages (Optional) * @read: Read callback for console input (Optional) * @device: The underlying TTY device driver (Optional) * @unblank: Callback to unblank the console (Optional) @@ -320,10 +322,14 @@ struct nbcon_write_context { * @data: Driver private data * @node: hlist node for the console list * - * @write_atomic: Write callback for atomic context * @nbcon_state: State for nbcon consoles * @nbcon_seq: Sequence number of the next record for nbcon to print + * @nbcon_device_ctxt: Context available for non-printing operations + * @nbcon_prev_seq: Seq num the previous nbcon owner was assigned to print * @pbufs: Pointer to nbcon private buffer + * @kthread: Printer kthread for this console + * @rcuwait: RCU-safe wait object for @kthread waking + * @irq_work: Defer @kthread waking to IRQ work context */ struct console { char name[16]; @@ -345,11 +351,121 @@ struct console { struct hlist_node node; /* nbcon console specific members */ - bool (*write_atomic)(struct console *con, - struct nbcon_write_context *wctxt); + + /** + * @write_atomic: + * + * NBCON callback to write out text in any context. (Optional) + * + * This callback is called with the console already acquired. However, + * a higher priority context is allowed to take it over by default. + * + * The callback must call nbcon_enter_unsafe() and nbcon_exit_unsafe() + * around any code where the takeover is not safe, for example, when + * manipulating the serial port registers. + * + * nbcon_enter_unsafe() will fail if the context has lost the console + * ownership in the meantime. In this case, the callback is no longer + * allowed to go forward. It must back out immediately and carefully. + * The buffer content is also no longer trusted since it no longer + * belongs to the context. + * + * The callback should allow the takeover whenever it is safe. It + * increases the chance to see messages when the system is in trouble. + * If the driver must reacquire ownership in order to finalize or + * revert hardware changes, nbcon_reacquire_nobuf() can be used. + * However, on reacquire the buffer content is no longer available. A + * reacquire cannot be used to resume printing. + * + * The callback can be called from any context (including NMI). + * Therefore it must avoid usage of any locking and instead rely + * on the console ownership for synchronization. + */ + void (*write_atomic)(struct console *con, struct nbcon_write_context *wctxt); + + /** + * @write_thread: + * + * NBCON callback to write out text in task context. + * + * This callback must be called only in task context with both + * device_lock() and the nbcon console acquired with + * NBCON_PRIO_NORMAL. + * + * The same rules for console ownership verification and unsafe + * sections handling applies as with write_atomic(). + * + * The console ownership handling is necessary for synchronization + * against write_atomic() which is synchronized only via the context. + * + * The device_lock() provides the primary serialization for operations + * on the device. It might be as relaxed (mutex)[*] or as tight + * (disabled preemption and interrupts) as needed. It allows + * the kthread to operate in the least restrictive mode[**]. + * + * [*] Standalone nbcon_context_try_acquire() is not safe with + * the preemption enabled, see nbcon_owner_matches(). But it + * can be safe when always called in the preemptive context + * under the device_lock(). + * + * [**] The device_lock() makes sure that nbcon_context_try_acquire() + * would never need to spin which is important especially with + * PREEMPT_RT. + */ + void (*write_thread)(struct console *con, struct nbcon_write_context *wctxt); + + /** + * @device_lock: + * + * NBCON callback to begin synchronization with driver code. + * + * Console drivers typically must deal with access to the hardware + * via user input/output (such as an interactive login shell) and + * output of kernel messages via printk() calls. This callback is + * called by the printk-subsystem whenever it needs to synchronize + * with hardware access by the driver. It should be implemented to + * use whatever synchronization mechanism the driver is using for + * itself (for example, the port lock for uart serial consoles). + * + * The callback is always called from task context. It may use any + * synchronization method required by the driver. + * + * IMPORTANT: The callback MUST disable migration. The console driver + * may be using a synchronization mechanism that already takes + * care of this (such as spinlocks). Otherwise this function must + * explicitly call migrate_disable(). + * + * The flags argument is provided as a convenience to the driver. It + * will be passed again to device_unlock(). It can be ignored if the + * driver does not need it. + */ + void (*device_lock)(struct console *con, unsigned long *flags); + + /** + * @device_unlock: + * + * NBCON callback to finish synchronization with driver code. + * + * It is the counterpart to device_lock(). + * + * This callback is always called from task context. It must + * appropriately re-enable migration (depending on how device_lock() + * disabled migration). + * + * The flags argument is the value of the same variable that was + * passed to device_lock(). + */ + void (*device_unlock)(struct console *con, unsigned long flags); + atomic_t __private nbcon_state; atomic_long_t __private nbcon_seq; + struct nbcon_context __private nbcon_device_ctxt; + atomic_long_t __private nbcon_prev_seq; + struct printk_buffers *pbufs; + struct task_struct *kthread; + struct rcuwait rcuwait; + struct irq_work irq_work; }; #ifdef CONFIG_LOCKDEP @@ -378,28 +494,34 @@ extern void console_list_unlock(void) __releases(console_mutex); extern struct hlist_head console_list; /** - * console_srcu_read_flags - Locklessly read the console flags + * console_srcu_read_flags - Locklessly read flags of a possibly registered + * console * @con: struct console pointer of console to read flags from * - * This function provides the necessary READ_ONCE() and data_race() - * notation for locklessly reading the console flags. The READ_ONCE() - * in this function matches the WRITE_ONCE() when @flags are modified - * for registered consoles with console_srcu_write_flags(). + * Locklessly reading @con->flags provides a consistent read value because + * there is at most one CPU modifying @con->flags and that CPU is using only + * read-modify-write operations to do so. + * + * Requires console_srcu_read_lock to be held, which implies that @con might + * be a registered console. The purpose of holding console_srcu_read_lock is + * to guarantee that the console state is valid (CON_SUSPENDED/CON_ENABLED) + * and that no exit/cleanup routines will run if the console is currently + * undergoing unregistration. * - * Only use this function to read console flags when locklessly - * iterating the console list via srcu. + * If the caller is holding the console_list_lock or it is _certain_ that + * @con is not and will not become registered, the caller may read + * @con->flags directly instead. * * Context: Any context. + * Return: The current value of the @con->flags field. */ static inline short console_srcu_read_flags(const struct console *con) { WARN_ON_ONCE(!console_srcu_read_lock_is_held()); /* - * Locklessly reading console->flags provides a consistent - * read value because there is at most one CPU modifying - * console->flags and that CPU is using only read-modify-write - * operations to do so. + * The READ_ONCE() matches the WRITE_ONCE() when @flags are modified + * for registered consoles with console_srcu_write_flags(). */ return data_race(READ_ONCE(con->flags)); } @@ -477,13 +599,19 @@ static inline bool console_is_registered(const struct console *con) hlist_for_each_entry(con, &console_list, node) #ifdef CONFIG_PRINTK +extern void nbcon_cpu_emergency_enter(void); +extern void nbcon_cpu_emergency_exit(void); extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt); extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt); extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt); +extern void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt); #else +static inline void nbcon_cpu_emergency_enter(void) { } +static inline void nbcon_cpu_emergency_exit(void) { } static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; } static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; } static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; } +static inline void nbcon_reacquire_nobuf(struct nbcon_write_context *wctxt) { } #endif extern int console_set_on_cmdline; diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 8a78fabeafc3..af9fe87a0922 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h @@ -26,26 +26,26 @@ extern void user_exit_callable(void); static inline void user_enter(void) { if (context_tracking_enabled()) - ct_user_enter(CONTEXT_USER); + ct_user_enter(CT_STATE_USER); } static inline void user_exit(void) { if (context_tracking_enabled()) - ct_user_exit(CONTEXT_USER); + ct_user_exit(CT_STATE_USER); } /* Called with interrupts disabled. */ static __always_inline void user_enter_irqoff(void) { if (context_tracking_enabled()) - __ct_user_enter(CONTEXT_USER); + __ct_user_enter(CT_STATE_USER); } static __always_inline void user_exit_irqoff(void) { if (context_tracking_enabled()) - __ct_user_exit(CONTEXT_USER); + __ct_user_exit(CT_STATE_USER); } static inline enum ctx_state exception_enter(void) @@ -57,7 +57,7 @@ static inline enum ctx_state exception_enter(void) return 0; prev_ctx = __ct_state(); - if (prev_ctx != CONTEXT_KERNEL) + if (prev_ctx != CT_STATE_KERNEL) ct_user_exit(prev_ctx); return prev_ctx; @@ -67,7 +67,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) { if (!IS_ENABLED(CONFIG_HAVE_CONTEXT_TRACKING_USER_OFFSTACK) && context_tracking_enabled()) { - if (prev_ctx != CONTEXT_KERNEL) + if (prev_ctx != CT_STATE_KERNEL) ct_user_enter(prev_ctx); } } @@ -75,7 +75,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) static __always_inline bool context_tracking_guest_enter(void) { if (context_tracking_enabled()) - __ct_user_enter(CONTEXT_GUEST); + __ct_user_enter(CT_STATE_GUEST); return context_tracking_enabled_this_cpu(); } @@ -83,7 +83,7 @@ static __always_inline bool context_tracking_guest_enter(void) static __always_inline bool context_tracking_guest_exit(void) { if (context_tracking_enabled()) - __ct_user_exit(CONTEXT_GUEST); + __ct_user_exit(CT_STATE_GUEST); return context_tracking_enabled_this_cpu(); } @@ -115,13 +115,17 @@ extern void ct_idle_enter(void); extern void ct_idle_exit(void); /* - * Is the current CPU in an extended quiescent state? + * Is RCU watching the current CPU (IOW, it is not in an extended quiescent state)? + * + * Note that this returns the actual boolean data (watching / not watching), + * whereas ct_rcu_watching() returns the RCU_WATCHING subvariable of + * context_tracking.state. * * No ordering, as we are sampling CPU-local information. */ -static __always_inline bool rcu_dynticks_curr_cpu_in_eqs(void) +static __always_inline bool rcu_is_watching_curr_cpu(void) { - return !(raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & RCU_DYNTICKS_IDX); + return raw_atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING; } /* @@ -142,9 +146,9 @@ static __always_inline bool warn_rcu_enter(void) * lots of the actual reporting also relies on RCU. */ preempt_disable_notrace(); - if (rcu_dynticks_curr_cpu_in_eqs()) { + if (!rcu_is_watching_curr_cpu()) { ret = true; - ct_state_inc(RCU_DYNTICKS_IDX); + ct_state_inc(CT_RCU_WATCHING); } return ret; @@ -153,7 +157,7 @@ static __always_inline bool warn_rcu_enter(void) static __always_inline void warn_rcu_exit(bool rcu) { if (rcu) - ct_state_inc(RCU_DYNTICKS_IDX); + ct_state_inc(CT_RCU_WATCHING); preempt_enable_notrace(); } diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index bbff5f7f8803..7b8433d5a8ef 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h @@ -7,22 +7,22 @@ #include <linux/context_tracking_irq.h> /* Offset to allow distinguishing irq vs. task-based idle entry/exit. */ -#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1) +#define CT_NESTING_IRQ_NONIDLE ((LONG_MAX / 2) + 1) enum ctx_state { - CONTEXT_DISABLED = -1, /* returned by ct_state() if unknown */ - CONTEXT_KERNEL = 0, - CONTEXT_IDLE = 1, - CONTEXT_USER = 2, - CONTEXT_GUEST = 3, - CONTEXT_MAX = 4, + CT_STATE_DISABLED = -1, /* returned by ct_state() if unknown */ + CT_STATE_KERNEL = 0, + CT_STATE_IDLE = 1, + CT_STATE_USER = 2, + CT_STATE_GUEST = 3, + CT_STATE_MAX = 4, }; -/* Even value for idle, else odd. */ -#define RCU_DYNTICKS_IDX CONTEXT_MAX +/* Odd value for watching, else even. */ +#define CT_RCU_WATCHING CT_STATE_MAX -#define CT_STATE_MASK (CONTEXT_MAX - 1) -#define CT_DYNTICKS_MASK (~CT_STATE_MASK) +#define CT_STATE_MASK (CT_STATE_MAX - 1) +#define CT_RCU_WATCHING_MASK (~CT_STATE_MASK) struct context_tracking { #ifdef CONFIG_CONTEXT_TRACKING_USER @@ -39,8 +39,8 @@ struct context_tracking { atomic_t state; #endif #ifdef CONFIG_CONTEXT_TRACKING_IDLE - long dynticks_nesting; /* Track process nesting level. */ - long dynticks_nmi_nesting; /* Track irq/NMI nesting level. */ + long nesting; /* Track process nesting level. */ + long nmi_nesting; /* Track irq/NMI nesting level. */ #endif }; @@ -56,47 +56,47 @@ static __always_inline int __ct_state(void) #endif #ifdef CONFIG_CONTEXT_TRACKING_IDLE -static __always_inline int ct_dynticks(void) +static __always_inline int ct_rcu_watching(void) { - return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_DYNTICKS_MASK; + return atomic_read(this_cpu_ptr(&context_tracking.state)) & CT_RCU_WATCHING_MASK; } -static __always_inline int ct_dynticks_cpu(int cpu) +static __always_inline int ct_rcu_watching_cpu(int cpu) { struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); - return atomic_read(&ct->state) & CT_DYNTICKS_MASK; + return atomic_read(&ct->state) & CT_RCU_WATCHING_MASK; } -static __always_inline int ct_dynticks_cpu_acquire(int cpu) +static __always_inline int ct_rcu_watching_cpu_acquire(int cpu) { struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); - return atomic_read_acquire(&ct->state) & CT_DYNTICKS_MASK; + return atomic_read_acquire(&ct->state) & CT_RCU_WATCHING_MASK; } -static __always_inline long ct_dynticks_nesting(void) +static __always_inline long ct_nesting(void) { - return __this_cpu_read(context_tracking.dynticks_nesting); + return __this_cpu_read(context_tracking.nesting); } -static __always_inline long ct_dynticks_nesting_cpu(int cpu) +static __always_inline long ct_nesting_cpu(int cpu) { struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); - return ct->dynticks_nesting; + return ct->nesting; } -static __always_inline long ct_dynticks_nmi_nesting(void) +static __always_inline long ct_nmi_nesting(void) { - return __this_cpu_read(context_tracking.dynticks_nmi_nesting); + return __this_cpu_read(context_tracking.nmi_nesting); } -static __always_inline long ct_dynticks_nmi_nesting_cpu(int cpu) +static __always_inline long ct_nmi_nesting_cpu(int cpu) { struct context_tracking *ct = per_cpu_ptr(&context_tracking, cpu); - return ct->dynticks_nmi_nesting; + return ct->nmi_nesting; } #endif /* #ifdef CONFIG_CONTEXT_TRACKING_IDLE */ @@ -113,7 +113,7 @@ static __always_inline bool context_tracking_enabled_cpu(int cpu) return context_tracking_enabled() && per_cpu(context_tracking.active, cpu); } -static inline bool context_tracking_enabled_this_cpu(void) +static __always_inline bool context_tracking_enabled_this_cpu(void) { return context_tracking_enabled() && __this_cpu_read(context_tracking.active); } @@ -123,14 +123,14 @@ static inline bool context_tracking_enabled_this_cpu(void) * * Returns the current cpu's context tracking state if context tracking * is enabled. If context tracking is disabled, returns - * CONTEXT_DISABLED. This should be used primarily for debugging. + * CT_STATE_DISABLED. This should be used primarily for debugging. */ static __always_inline int ct_state(void) { int ret; if (!context_tracking_enabled()) - return CONTEXT_DISABLED; + return CT_STATE_DISABLED; preempt_disable(); ret = __ct_state(); diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 0904ba010341..edeb8532ce0f 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -42,9 +42,35 @@ extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr); extern int dump_align(struct coredump_params *cprm, int align); int dump_user_range(struct coredump_params *cprm, unsigned long start, unsigned long len); -extern void do_coredump(const kernel_siginfo_t *siginfo); +extern int do_coredump(const kernel_siginfo_t *siginfo); + +/* + * Logging for the coredump code, ratelimited. + * The TGID and comm fields are added to the message. + */ + +#define __COREDUMP_PRINTK(Level, Format, ...) \ + do { \ + char comm[TASK_COMM_LEN]; \ + \ + get_task_comm(comm, current); \ + printk_ratelimited(Level "coredump: %d(%*pE): " Format "\n", \ + task_tgid_vnr(current), (int)strlen(comm), comm, ##__VA_ARGS__); \ + } while (0) \ + +#define coredump_report(fmt, ...) __COREDUMP_PRINTK(KERN_INFO, fmt, ##__VA_ARGS__) +#define coredump_report_failure(fmt, ...) __COREDUMP_PRINTK(KERN_WARNING, fmt, ##__VA_ARGS__) + #else -static inline void do_coredump(const kernel_siginfo_t *siginfo) {} +static inline int do_coredump(const kernel_siginfo_t *siginfo) +{ + /* Coredump support is not available, can't fail. */ + return 0; +} + +#define coredump_report(...) +#define coredump_report_failure(...) + #endif #if defined(CONFIG_COREDUMP) && defined(CONFIG_SYSCTL) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index d4d2f4d1d7cb..e0e19d9c1323 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -577,12 +577,6 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div, #define CPUFREQ_POLICY_POWERSAVE (1) #define CPUFREQ_POLICY_PERFORMANCE (2) -/* - * The polling frequency depends on the capability of the processor. Default - * polling frequency is 1000 times the transition latency of the processor. - */ -#define LATENCY_MULTIPLIER (1000) - struct cpufreq_governor { char name[CPUFREQ_NAME_LEN]; int (*init)(struct cpufreq_policy *policy); diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 9316c39260e0..2361ed4d2b15 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -144,7 +144,8 @@ enum cpuhp_state { CPUHP_AP_IRQ_ARMADA_XP_STARTING, CPUHP_AP_IRQ_BCM2836_STARTING, CPUHP_AP_IRQ_MIPS_GIC_STARTING, - CPUHP_AP_IRQ_LOONGARCH_STARTING, + CPUHP_AP_IRQ_EIOINTC_STARTING, + CPUHP_AP_IRQ_AVECINTC_STARTING, CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, CPUHP_AP_IRQ_RISCV_SBI_IPI_STARTING, @@ -152,7 +153,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING, CPUHP_AP_PERF_X86_STARTING, CPUHP_AP_PERF_X86_AMD_IBS_STARTING, - CPUHP_AP_PERF_X86_CSTATE_STARTING, CPUHP_AP_PERF_XTENSA_STARTING, CPUHP_AP_ARM_VFP_STARTING, CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, @@ -209,7 +209,6 @@ enum cpuhp_state { CPUHP_AP_PERF_X86_AMD_UNCORE_ONLINE, CPUHP_AP_PERF_X86_AMD_POWER_ONLINE, CPUHP_AP_PERF_X86_RAPL_ONLINE, - CPUHP_AP_PERF_X86_CSTATE_ONLINE, CPUHP_AP_PERF_S390_CF_ONLINE, CPUHP_AP_PERF_S390_SF_ONLINE, CPUHP_AP_PERF_ARM_CCI_ONLINE, diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index de4cf0ee96f7..835e7b793f6a 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -99,6 +99,7 @@ static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask) extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, const struct task_struct *tsk2); +#ifdef CONFIG_CPUSETS_V1 #define cpuset_memory_pressure_bump() \ do { \ if (cpuset_memory_pressure_enabled) \ @@ -106,6 +107,9 @@ extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, } while (0) extern int cpuset_memory_pressure_enabled; extern void __cpuset_memory_pressure_bump(void); +#else +static inline void cpuset_memory_pressure_bump(void) { } +#endif extern void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task); @@ -113,7 +117,6 @@ extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *tsk); extern int cpuset_mem_spread_node(void); -extern int cpuset_slab_spread_node(void); static inline int cpuset_do_page_mem_spread(void) { @@ -246,11 +249,6 @@ static inline int cpuset_mem_spread_node(void) return 0; } -static inline int cpuset_slab_spread_node(void) -{ - return 0; -} - static inline int cpuset_do_page_mem_spread(void) { return 0; diff --git a/include/linux/damon.h b/include/linux/damon.h index 27c546bfc6d4..a67f2c4940e9 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -233,7 +233,6 @@ struct damos_quota { unsigned long charge_addr_from; /* For prioritization */ - unsigned long histogram[DAMOS_MAX_SCORE + 1]; unsigned int min_score; /* For feedback loop */ @@ -630,6 +629,8 @@ struct damon_ctx { unsigned long next_ops_update_sis; /* for waiting until the execution of the kdamond_fn is started */ struct completion kdamond_started; + /* for scheme quotas prioritization */ + unsigned long *regions_score_histogram; /* public: */ struct task_struct *kdamond; diff --git a/include/linux/decompress/unxz.h b/include/linux/decompress/unxz.h index f764e2a7201e..3dd2658a9dab 100644 --- a/include/linux/decompress/unxz.h +++ b/include/linux/decompress/unxz.h @@ -1,10 +1,9 @@ +/* SPDX-License-Identifier: 0BSD */ + /* * Wrapper for decompressing XZ-compressed kernel, initramfs, and initrd * * Author: Lasse Collin <lasse.collin@tukaani.org> - * - * This file has been put into the public domain. - * You can do whatever you want with this file. */ #ifndef DECOMPRESS_UNXZ_H diff --git a/include/linux/device.h b/include/linux/device.h index 34eb20f5966f..b4bde8d22697 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -707,6 +707,8 @@ struct device_physical_location { * for dma allocations. This flag is managed by the dma ops * instance from ->dma_supported. * @dma_skip_sync: DMA sync operations can be skipped for coherent buffers. + * @dma_iommu: Device is using default IOMMU implementation for DMA and + * doesn't rely on dma_ops structure. * * At the lowest level, every device in a Linux system is represented by an * instance of struct device. The device structure contains the information @@ -748,7 +750,7 @@ struct device { struct dev_pin_info *pins; #endif struct dev_msi_info msi; -#ifdef CONFIG_DMA_OPS +#ifdef CONFIG_ARCH_HAS_DMA_OPS const struct dma_map_ops *dma_ops; #endif u64 *dma_mask; /* dma mask (if dma'able device) */ @@ -822,6 +824,9 @@ struct device { #ifdef CONFIG_DMA_NEED_SYNC bool dma_skip_sync:1; #endif +#ifdef CONFIG_IOMMU_DMA + bool dma_iommu:1; +#endif }; /** diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h index edbe13d00776..d7e30d4f7503 100644 --- a/include/linux/dma-direct.h +++ b/include/linux/dma-direct.h @@ -12,7 +12,7 @@ #include <linux/mem_encrypt.h> #include <linux/swiotlb.h> -extern unsigned int zone_dma_bits; +extern u64 zone_dma_limit; /* * Record the mapping of CPU physical to DMA addresses for a given region. diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index 29c5650c1038..079b3dec0a16 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h @@ -79,6 +79,12 @@ to_dma_fence_array(struct dma_fence *fence) for (index = 0, fence = dma_fence_array_first(head); fence; \ ++(index), fence = dma_fence_array_next(head, index)) +struct dma_fence_array *dma_fence_array_alloc(int num_fences); +void dma_fence_array_init(struct dma_fence_array *array, + int num_fences, struct dma_fence **fences, + u64 context, unsigned seqno, + bool signal_on_any); + struct dma_fence_array *dma_fence_array_create(int num_fences, struct dma_fence **fences, u64 context, unsigned seqno, diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h index 064bad725061..27d15f60950a 100644 --- a/include/linux/dma-heap.h +++ b/include/linux/dma-heap.h @@ -9,14 +9,13 @@ #ifndef _DMA_HEAPS_H #define _DMA_HEAPS_H -#include <linux/cdev.h> #include <linux/types.h> struct dma_heap; /** * struct dma_heap_ops - ops to operate on a given heap - * @allocate: allocate dmabuf and return struct dma_buf ptr + * @allocate: allocate dmabuf and return struct dma_buf ptr * * allocate returns dmabuf on success, ERR_PTR(-errno) on error. */ @@ -41,28 +40,10 @@ struct dma_heap_export_info { void *priv; }; -/** - * dma_heap_get_drvdata() - get per-heap driver data - * @heap: DMA-Heap to retrieve private data for - * - * Returns: - * The per-heap data for the heap. - */ void *dma_heap_get_drvdata(struct dma_heap *heap); -/** - * dma_heap_get_name() - get heap name - * @heap: DMA-Heap to retrieve private data for - * - * Returns: - * The char* for the heap name. - */ const char *dma_heap_get_name(struct dma_heap *heap); -/** - * dma_heap_add - adds a heap to dmabuf heaps - * @exp_info: information needed to register this heap - */ struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info); #endif /* _DMA_HEAPS_H */ diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index 02a1c825896b..b7773201414c 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -13,20 +13,7 @@ struct cma; struct iommu_ops; -/* - * Values for struct dma_map_ops.flags: - * - * DMA_F_PCI_P2PDMA_SUPPORTED: Indicates the dma_map_ops implementation can - * handle PCI P2PDMA pages in the map_sg/unmap_sg operation. - * DMA_F_CAN_SKIP_SYNC: DMA sync operations can be skipped if the device is - * coherent and it's not an SWIOTLB buffer. - */ -#define DMA_F_PCI_P2PDMA_SUPPORTED (1 << 0) -#define DMA_F_CAN_SKIP_SYNC (1 << 1) - struct dma_map_ops { - unsigned int flags; - void *(*alloc)(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); @@ -37,11 +24,6 @@ struct dma_map_ops { gfp_t gfp); void (*free_pages)(struct device *dev, size_t size, struct page *vaddr, dma_addr_t dma_handle, enum dma_data_direction dir); - struct sg_table *(*alloc_noncontiguous)(struct device *dev, size_t size, - enum dma_data_direction dir, gfp_t gfp, - unsigned long attrs); - void (*free_noncontiguous)(struct device *dev, size_t size, - struct sg_table *sgt, enum dma_data_direction dir); int (*mmap)(struct device *, struct vm_area_struct *, void *, dma_addr_t, size_t, unsigned long attrs); @@ -88,7 +70,7 @@ struct dma_map_ops { unsigned long (*get_merge_boundary)(struct device *dev); }; -#ifdef CONFIG_DMA_OPS +#ifdef CONFIG_ARCH_HAS_DMA_OPS #include <asm/dma-mapping.h> static inline const struct dma_map_ops *get_dma_ops(struct device *dev) @@ -103,7 +85,7 @@ static inline void set_dma_ops(struct device *dev, { dev->dma_ops = dma_ops; } -#else /* CONFIG_DMA_OPS */ +#else /* CONFIG_ARCH_HAS_DMA_OPS */ static inline const struct dma_map_ops *get_dma_ops(struct device *dev) { return NULL; @@ -112,7 +94,7 @@ static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *dma_ops) { } -#endif /* CONFIG_DMA_OPS */ +#endif /* CONFIG_ARCH_HAS_DMA_OPS */ #ifdef CONFIG_DMA_CMA extern struct cma *dma_contiguous_default_area; @@ -219,20 +201,6 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, } #endif /* CONFIG_DMA_GLOBAL_POOL */ -/* - * This is the actual return value from the ->alloc_noncontiguous method. - * The users of the DMA API should only care about the sg_table, but to make - * the DMA-API internal vmaping and freeing easier we stash away the page - * array as well (except for the fallback case). This can go away any time, - * e.g. when a vmap-variant that takes a scatterlist comes along. - */ -struct dma_sgt_handle { - struct sg_table sgt; - struct page **pages; -}; -#define sgt_handle(sgt) \ - container_of((sgt), struct dma_sgt_handle, sgt) - int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, unsigned long attrs); diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index f693aafe221f..1524da363734 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h @@ -524,13 +524,11 @@ static inline unsigned int dma_get_max_seg_size(struct device *dev) return SZ_64K; } -static inline int dma_set_max_seg_size(struct device *dev, unsigned int size) +static inline void dma_set_max_seg_size(struct device *dev, unsigned int size) { - if (dev->dma_parms) { - dev->dma_parms->max_segment_size = size; - return 0; - } - return -EIO; + if (WARN_ON_ONCE(!dev->dma_parms)) + return; + dev->dma_parms->max_segment_size = size; } static inline unsigned long dma_get_seg_boundary(struct device *dev) @@ -559,13 +557,11 @@ static inline unsigned long dma_get_seg_boundary_nr_pages(struct device *dev, return (dma_get_seg_boundary(dev) >> page_shift) + 1; } -static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) +static inline void dma_set_seg_boundary(struct device *dev, unsigned long mask) { - if (dev->dma_parms) { - dev->dma_parms->segment_boundary_mask = mask; - return 0; - } - return -EIO; + if (WARN_ON_ONCE(!dev->dma_parms)) + return; + dev->dma_parms->segment_boundary_mask = mask; } static inline unsigned int dma_get_min_align_mask(struct device *dev) @@ -575,13 +571,12 @@ static inline unsigned int dma_get_min_align_mask(struct device *dev) return 0; } -static inline int dma_set_min_align_mask(struct device *dev, +static inline void dma_set_min_align_mask(struct device *dev, unsigned int min_align_mask) { if (WARN_ON_ONCE(!dev->dma_parms)) - return -EIO; + return; dev->dma_parms->min_align_mask = min_align_mask; - return 0; } #ifndef dma_get_cache_alignment diff --git a/include/linux/dma/ipu-dma.h b/include/linux/dma/ipu-dma.h deleted file mode 100644 index 6969391580d2..000000000000 --- a/include/linux/dma/ipu-dma.h +++ /dev/null @@ -1,174 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) 2008 - * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de> - * - * Copyright (C) 2005-2007 Freescale Semiconductor, Inc. - */ - -#ifndef __LINUX_DMA_IPU_DMA_H -#define __LINUX_DMA_IPU_DMA_H - -#include <linux/types.h> -#include <linux/dmaengine.h> - -/* IPU DMA Controller channel definitions. */ -enum ipu_channel { - IDMAC_IC_0 = 0, /* IC (encoding task) to memory */ - IDMAC_IC_1 = 1, /* IC (viewfinder task) to memory */ - IDMAC_ADC_0 = 1, - IDMAC_IC_2 = 2, - IDMAC_ADC_1 = 2, - IDMAC_IC_3 = 3, - IDMAC_IC_4 = 4, - IDMAC_IC_5 = 5, - IDMAC_IC_6 = 6, - IDMAC_IC_7 = 7, /* IC (sensor data) to memory */ - IDMAC_IC_8 = 8, - IDMAC_IC_9 = 9, - IDMAC_IC_10 = 10, - IDMAC_IC_11 = 11, - IDMAC_IC_12 = 12, - IDMAC_IC_13 = 13, - IDMAC_SDC_0 = 14, /* Background synchronous display data */ - IDMAC_SDC_1 = 15, /* Foreground data (overlay) */ - IDMAC_SDC_2 = 16, - IDMAC_SDC_3 = 17, - IDMAC_ADC_2 = 18, - IDMAC_ADC_3 = 19, - IDMAC_ADC_4 = 20, - IDMAC_ADC_5 = 21, - IDMAC_ADC_6 = 22, - IDMAC_ADC_7 = 23, - IDMAC_PF_0 = 24, - IDMAC_PF_1 = 25, - IDMAC_PF_2 = 26, - IDMAC_PF_3 = 27, - IDMAC_PF_4 = 28, - IDMAC_PF_5 = 29, - IDMAC_PF_6 = 30, - IDMAC_PF_7 = 31, -}; - -/* Order significant! */ -enum ipu_channel_status { - IPU_CHANNEL_FREE, - IPU_CHANNEL_INITIALIZED, - IPU_CHANNEL_READY, - IPU_CHANNEL_ENABLED, -}; - -#define IPU_CHANNELS_NUM 32 - -enum pixel_fmt { - /* 1 byte */ - IPU_PIX_FMT_GENERIC, - IPU_PIX_FMT_RGB332, - IPU_PIX_FMT_YUV420P, - IPU_PIX_FMT_YUV422P, - IPU_PIX_FMT_YUV420P2, - IPU_PIX_FMT_YVU422P, - /* 2 bytes */ - IPU_PIX_FMT_RGB565, - IPU_PIX_FMT_RGB666, - IPU_PIX_FMT_BGR666, - IPU_PIX_FMT_YUYV, - IPU_PIX_FMT_UYVY, - /* 3 bytes */ - IPU_PIX_FMT_RGB24, - IPU_PIX_FMT_BGR24, - /* 4 bytes */ - IPU_PIX_FMT_GENERIC_32, - IPU_PIX_FMT_RGB32, - IPU_PIX_FMT_BGR32, - IPU_PIX_FMT_ABGR32, - IPU_PIX_FMT_BGRA32, - IPU_PIX_FMT_RGBA32, -}; - -enum ipu_color_space { - IPU_COLORSPACE_RGB, - IPU_COLORSPACE_YCBCR, - IPU_COLORSPACE_YUV -}; - -/* - * Enumeration of IPU rotation modes - */ -enum ipu_rotate_mode { - /* Note the enum values correspond to BAM value */ - IPU_ROTATE_NONE = 0, - IPU_ROTATE_VERT_FLIP = 1, - IPU_ROTATE_HORIZ_FLIP = 2, - IPU_ROTATE_180 = 3, - IPU_ROTATE_90_RIGHT = 4, - IPU_ROTATE_90_RIGHT_VFLIP = 5, - IPU_ROTATE_90_RIGHT_HFLIP = 6, - IPU_ROTATE_90_LEFT = 7, -}; - -/* - * Enumeration of DI ports for ADC. - */ -enum display_port { - DISP0, - DISP1, - DISP2, - DISP3 -}; - -struct idmac_video_param { - unsigned short in_width; - unsigned short in_height; - uint32_t in_pixel_fmt; - unsigned short out_width; - unsigned short out_height; - uint32_t out_pixel_fmt; - unsigned short out_stride; - bool graphics_combine_en; - bool global_alpha_en; - bool key_color_en; - enum display_port disp; - unsigned short out_left; - unsigned short out_top; -}; - -/* - * Union of initialization parameters for a logical channel. So far only video - * parameters are used. - */ -union ipu_channel_param { - struct idmac_video_param video; -}; - -struct idmac_tx_desc { - struct dma_async_tx_descriptor txd; - struct scatterlist *sg; /* scatterlist for this */ - unsigned int sg_len; /* tx-descriptor. */ - struct list_head list; -}; - -struct idmac_channel { - struct dma_chan dma_chan; - dma_cookie_t completed; /* last completed cookie */ - union ipu_channel_param params; - enum ipu_channel link; /* input channel, linked to the output */ - enum ipu_channel_status status; - void *client; /* Only one client per channel */ - unsigned int n_tx_desc; - struct idmac_tx_desc *desc; /* allocated tx-descriptors */ - struct scatterlist *sg[2]; /* scatterlist elements in buffer-0 and -1 */ - struct list_head free_list; /* free tx-descriptors */ - struct list_head queue; /* queued tx-descriptors */ - spinlock_t lock; /* protects sg[0,1], queue */ - struct mutex chan_mutex; /* protects status, cookie, free_list */ - bool sec_chan_en; - int active_buffer; - unsigned int eof_irq; - char eof_name[16]; /* EOF IRQ name for request_irq() */ -}; - -#define to_tx_desc(tx) container_of(tx, struct idmac_tx_desc, txd) -#define to_idmac_chan(c) container_of(c, struct idmac_channel, dma_chan) - -#endif /* __LINUX_DMA_IPU_DMA_H */ diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h index 1e491c5dcac2..2dea217629d0 100644 --- a/include/linux/dma/k3-udma-glue.h +++ b/include/linux/dma/k3-udma-glue.h @@ -136,8 +136,6 @@ u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn, u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn); int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_num); -void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn, - u32 flow_num); void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn, u32 flow_num, void *data, void (*cleanup)(void *data, dma_addr_t desc_dma), diff --git a/include/linux/dpll.h b/include/linux/dpll.h index d275736230b3..81f7b623d0ba 100644 --- a/include/linux/dpll.h +++ b/include/linux/dpll.h @@ -15,6 +15,7 @@ struct dpll_device; struct dpll_pin; +struct dpll_pin_esync; struct dpll_device_ops { int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv, @@ -83,6 +84,13 @@ struct dpll_pin_ops { int (*ffo_get)(const struct dpll_pin *pin, void *pin_priv, const struct dpll_device *dpll, void *dpll_priv, s64 *ffo, struct netlink_ext_ack *extack); + int (*esync_set)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + u64 freq, struct netlink_ext_ack *extack); + int (*esync_get)(const struct dpll_pin *pin, void *pin_priv, + const struct dpll_device *dpll, void *dpll_priv, + struct dpll_pin_esync *esync, + struct netlink_ext_ack *extack); }; struct dpll_pin_frequency { @@ -111,6 +119,13 @@ struct dpll_pin_phase_adjust_range { s32 max; }; +struct dpll_pin_esync { + u64 freq; + const struct dpll_pin_frequency *range; + u8 range_num; + u8 pulse; +}; + struct dpll_pin_properties { const char *board_label; const char *panel_label; diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h index b0fb775a600d..1e50cdb83ae5 100644 --- a/include/linux/entry-common.h +++ b/include/linux/entry-common.h @@ -108,7 +108,7 @@ static __always_inline void enter_from_user_mode(struct pt_regs *regs) arch_enter_from_user_mode(regs); lockdep_hardirqs_off(CALLER_ADDR0); - CT_WARN_ON(__ct_state() != CONTEXT_USER); + CT_WARN_ON(__ct_state() != CT_STATE_USER); user_exit_irqoff(); instrumentation_begin(); diff --git a/include/linux/err.h b/include/linux/err.h index b5d9bb2a2349..a4dacd745fcf 100644 --- a/include/linux/err.h +++ b/include/linux/err.h @@ -41,6 +41,9 @@ static inline void * __must_check ERR_PTR(long error) return (void *) error; } +/* Return the pointer in the percpu address space. */ +#define ERR_PTR_PCPU(error) ((void __percpu *)(unsigned long)ERR_PTR(error)) + /** * PTR_ERR - Extract the error code from an error pointer. * @ptr: An error pointer. @@ -51,6 +54,9 @@ static inline long __must_check PTR_ERR(__force const void *ptr) return (long) ptr; } +/* Read an error pointer from the percpu address space. */ +#define PTR_ERR_PCPU(ptr) (PTR_ERR((const void *)(__force const unsigned long)(ptr))) + /** * IS_ERR - Detect an error pointer. * @ptr: The pointer to check. @@ -61,6 +67,9 @@ static inline bool __must_check IS_ERR(__force const void *ptr) return IS_ERR_VALUE((unsigned long)ptr); } +/* Read an error pointer from the percpu address space. */ +#define IS_ERR_PCPU(ptr) (IS_ERR((const void *)(__force const unsigned long)(ptr))) + /** * IS_ERR_OR_NULL - Detect an error pointer or a null pointer. * @ptr: The pointer to check. diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 0ed47d00549b..30114c25ad12 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h @@ -645,7 +645,7 @@ static inline struct ethhdr *eth_skb_pull_mac(struct sk_buff *skb) } /** - * eth_skb_pad - Pad buffer to mininum number of octets for Ethernet frame + * eth_skb_pad - Pad buffer to minimum number of octets for Ethernet frame * @skb: Buffer to pad * * An Ethernet frame should have a minimum size of 60 bytes. This function diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 989c94eddb2b..12f6dc567598 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h @@ -727,9 +727,13 @@ struct kernel_ethtool_ts_info { * @cap_link_lanes_supported: indicates if the driver supports lanes * parameter. * @cap_rss_ctx_supported: indicates if the driver supports RSS - * contexts. + * contexts via legacy API, drivers implementing @create_rxfh_context + * do not have to set this bit. * @cap_rss_sym_xor_supported: indicates if the driver supports symmetric-xor * RSS. + * @rxfh_per_ctx_key: device supports setting different RSS key for each + * additional context. Netlink API should report hfunc, key, and input_xfrm + * for every context, not just context 0. * @rxfh_indir_space: max size of RSS indirection tables, if indirection table * size as returned by @get_rxfh_indir_size may change during lifetime * of the device. Leave as 0 if the table size is constant. @@ -951,6 +955,7 @@ struct ethtool_ops { u32 cap_link_lanes_supported:1; u32 cap_rss_ctx_supported:1; u32 cap_rss_sym_xor_supported:1; + u32 rxfh_per_ctx_key:1; u32 rxfh_indir_space; u16 rxfh_key_space; u16 rxfh_priv_size; diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h index fae0dfb9a9c8..aba91335273a 100644 --- a/include/linux/ethtool_netlink.h +++ b/include/linux/ethtool_netlink.h @@ -23,8 +23,10 @@ struct phy_device; int ethnl_cable_test_alloc(struct phy_device *phydev, u8 cmd); void ethnl_cable_test_free(struct phy_device *phydev); void ethnl_cable_test_finished(struct phy_device *phydev); -int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, u8 result); -int ethnl_cable_test_fault_length(struct phy_device *phydev, u8 pair, u32 cm); +int ethnl_cable_test_result_with_src(struct phy_device *phydev, u8 pair, + u8 result, u32 src); +int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, u8 pair, + u32 cm, u32 src); int ethnl_cable_test_amplitude(struct phy_device *phydev, u8 pair, s16 mV); int ethnl_cable_test_pulse(struct phy_device *phydev, u16 mV); int ethnl_cable_test_step(struct phy_device *phydev, u32 first, u32 last, @@ -54,14 +56,14 @@ static inline void ethnl_cable_test_free(struct phy_device *phydev) static inline void ethnl_cable_test_finished(struct phy_device *phydev) { } -static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, - u8 result) +static inline int ethnl_cable_test_result_with_src(struct phy_device *phydev, + u8 pair, u8 result, u32 src) { return -EOPNOTSUPP; } -static inline int ethnl_cable_test_fault_length(struct phy_device *phydev, - u8 pair, u32 cm) +static inline int ethnl_cable_test_fault_length_with_src(struct phy_device *phydev, + u8 pair, u32 cm, u32 src) { return -EOPNOTSUPP; } @@ -119,4 +121,19 @@ static inline bool ethtool_dev_mm_supported(struct net_device *dev) } #endif /* IS_ENABLED(CONFIG_ETHTOOL_NETLINK) */ + +static inline int ethnl_cable_test_result(struct phy_device *phydev, u8 pair, + u8 result) +{ + return ethnl_cable_test_result_with_src(phydev, pair, result, + ETHTOOL_A_CABLE_INF_SRC_TDR); +} + +static inline int ethnl_cable_test_fault_length(struct phy_device *phydev, + u8 pair, u32 cm) +{ + return ethnl_cable_test_fault_length_with_src(phydev, pair, cm, + ETHTOOL_A_CABLE_INF_SRC_TDR); +} + #endif /* _LINUX_ETHTOOL_NETLINK_H_ */ diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 01bee2b289c2..b0b821edfd97 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -19,7 +19,6 @@ #define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */ #define F2FS_MAX_EXTENSION 64 /* # of extension entries */ #define F2FS_EXTENSION_LEN 8 /* max size of extension */ -#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS) #define NULL_ADDR ((block_t)0) /* used as block_t addresses */ #define NEW_ADDR ((block_t)-1) /* used as block_t addresses */ @@ -28,6 +27,7 @@ #define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS) #define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS) #define F2FS_BLK_END_BYTES(blk) (F2FS_BLK_TO_BYTES(blk + 1) - 1) +#define F2FS_BLK_ALIGN(x) (F2FS_BYTES_TO_BLK((x) + F2FS_BLKSIZE - 1)) /* 0, 1(node nid), 2(meta nid) are reserved node id */ #define F2FS_RESERVED_NODE_NUM 3 @@ -278,7 +278,7 @@ struct node_footer { #define F2FS_INLINE_DATA 0x02 /* file inline data flag */ #define F2FS_INLINE_DENTRY 0x04 /* file inline dentry flag */ #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ -#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ +#define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries (obsolete) */ #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ #define F2FS_PIN_FILE 0x40 /* file should not be gced */ #define F2FS_COMPRESS_RELEASED 0x80 /* file released compressed blocks */ diff --git a/include/linux/falloc.h b/include/linux/falloc.h index f3f0b97b1675..3f49f3df6af5 100644 --- a/include/linux/falloc.h +++ b/include/linux/falloc.h @@ -25,12 +25,18 @@ struct space_resv { #define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv) #define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv) -#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \ - FALLOC_FL_PUNCH_HOLE | \ - FALLOC_FL_COLLAPSE_RANGE | \ - FALLOC_FL_ZERO_RANGE | \ - FALLOC_FL_INSERT_RANGE | \ - FALLOC_FL_UNSHARE_RANGE) +/* + * Mask of all supported fallocate modes. Only one can be set at a time. + * + * In addition to the mode bit, the mode argument can also encode flags. + * FALLOC_FL_KEEP_SIZE is the only supported flag so far. + */ +#define FALLOC_FL_MODE_MASK (FALLOC_FL_ALLOCATE_RANGE | \ + FALLOC_FL_PUNCH_HOLE | \ + FALLOC_FL_COLLAPSE_RANGE | \ + FALLOC_FL_ZERO_RANGE | \ + FALLOC_FL_INSERT_RANGE | \ + FALLOC_FL_UNSHARE_RANGE) /* on ia32 l_start is on a 32-bit boundary */ #if defined(CONFIG_X86_64) diff --git a/include/linux/fault-inject.h b/include/linux/fault-inject.h index 354413950d34..8c829d28dcf3 100644 --- a/include/linux/fault-inject.h +++ b/include/linux/fault-inject.h @@ -2,13 +2,17 @@ #ifndef _LINUX_FAULT_INJECT_H #define _LINUX_FAULT_INJECT_H +#include <linux/err.h> +#include <linux/types.h> + +struct dentry; +struct kmem_cache; + #ifdef CONFIG_FAULT_INJECTION -#include <linux/types.h> -#include <linux/debugfs.h> +#include <linux/atomic.h> #include <linux/configfs.h> #include <linux/ratelimit.h> -#include <linux/atomic.h> /* * For explanation of the elements of this struct, see @@ -51,6 +55,28 @@ int setup_fault_attr(struct fault_attr *attr, char *str); bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags); bool should_fail(struct fault_attr *attr, ssize_t size); +#else /* CONFIG_FAULT_INJECTION */ + +struct fault_attr { +}; + +#define DECLARE_FAULT_ATTR(name) struct fault_attr name = {} + +static inline int setup_fault_attr(struct fault_attr *attr, char *str) +{ + return 0; /* Note: 0 means error for __setup() handlers! */ +} +static inline bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags) +{ + return false; +} +static inline bool should_fail(struct fault_attr *attr, ssize_t size) +{ + return false; +} + +#endif /* CONFIG_FAULT_INJECTION */ + #ifdef CONFIG_FAULT_INJECTION_DEBUG_FS struct dentry *fault_create_debugfs_attr(const char *name, @@ -87,10 +113,6 @@ static inline void fault_config_init(struct fault_config *config, #endif /* CONFIG_FAULT_INJECTION_CONFIGFS */ -#endif /* CONFIG_FAULT_INJECTION */ - -struct kmem_cache; - #ifdef CONFIG_FAIL_PAGE_ALLOC bool should_fail_alloc_page(gfp_t gfp_mask, unsigned int order); #else diff --git a/include/linux/fb.h b/include/linux/fb.h index db7d97b10964..267b59ead432 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -510,6 +510,7 @@ struct fb_info { void *par; bool skip_vt_switch; /* no VT switch on suspend/resume required */ + bool skip_panic; /* Do not write to the fb after a panic */ }; /* This will go away @@ -601,6 +602,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf, /* fbmem.c */ extern int register_framebuffer(struct fb_info *fb_info); extern void unregister_framebuffer(struct fb_info *fb_info); +extern int devm_register_framebuffer(struct device *dev, struct fb_info *fb_info); extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size); extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx, u32 height, u32 shift_high, u32 shift_low, u32 mod); diff --git a/include/linux/file.h b/include/linux/file.h index 59b146a14dca..f98de143245a 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -11,6 +11,7 @@ #include <linux/posix_types.h> #include <linux/errno.h> #include <linux/cleanup.h> +#include <linux/err.h> struct file; @@ -35,51 +36,52 @@ static inline void fput_light(struct file *file, int fput_needed) fput(file); } +/* either a reference to struct file + flags + * (cloned vs. borrowed, pos locked), with + * flags stored in lower bits of value, + * or empty (represented by 0). + */ struct fd { - struct file *file; - unsigned int flags; + unsigned long word; }; #define FDPUT_FPUT 1 #define FDPUT_POS_UNLOCK 2 -static inline void fdput(struct fd fd) +#define fd_file(f) ((struct file *)((f).word & ~(FDPUT_FPUT|FDPUT_POS_UNLOCK))) +static inline bool fd_empty(struct fd f) { - if (fd.flags & FDPUT_FPUT) - fput(fd.file); + return unlikely(!f.word); } -extern struct file *fget(unsigned int fd); -extern struct file *fget_raw(unsigned int fd); -extern struct file *fget_task(struct task_struct *task, unsigned int fd); -extern unsigned long __fdget(unsigned int fd); -extern unsigned long __fdget_raw(unsigned int fd); -extern unsigned long __fdget_pos(unsigned int fd); -extern void __f_unlock_pos(struct file *); - -static inline struct fd __to_fd(unsigned long v) +#define EMPTY_FD (struct fd){0} +static inline struct fd BORROWED_FD(struct file *f) { - return (struct fd){(struct file *)(v & ~3),v & 3}; + return (struct fd){(unsigned long)f}; } - -static inline struct fd fdget(unsigned int fd) +static inline struct fd CLONED_FD(struct file *f) { - return __to_fd(__fdget(fd)); + return (struct fd){(unsigned long)f | FDPUT_FPUT}; } -static inline struct fd fdget_raw(unsigned int fd) +static inline void fdput(struct fd fd) { - return __to_fd(__fdget_raw(fd)); + if (fd.word & FDPUT_FPUT) + fput(fd_file(fd)); } -static inline struct fd fdget_pos(int fd) -{ - return __to_fd(__fdget_pos(fd)); -} +extern struct file *fget(unsigned int fd); +extern struct file *fget_raw(unsigned int fd); +extern struct file *fget_task(struct task_struct *task, unsigned int fd); +extern void __f_unlock_pos(struct file *); + +struct fd fdget(unsigned int fd); +struct fd fdget_raw(unsigned int fd); +struct fd fdget_pos(unsigned int fd); static inline void fdput_pos(struct fd f) { - if (f.flags & FDPUT_POS_UNLOCK) - __f_unlock_pos(f.file); + if (f.word & FDPUT_POS_UNLOCK) + __f_unlock_pos(fd_file(f)); fdput(f); } @@ -96,6 +98,7 @@ extern void put_unused_fd(unsigned int fd); DEFINE_CLASS(get_unused_fd, int, if (_T >= 0) put_unused_fd(_T), get_unused_fd_flags(flags), unsigned flags) +DEFINE_FREE(fput, struct file *, if (!IS_ERR_OR_NULL(_T)) fput(_T)) /* * take_fd() will take care to set @fd to -EBADF ensuring that diff --git a/include/linux/filelock.h b/include/linux/filelock.h index daee999d05f3..bb44224c6676 100644 --- a/include/linux/filelock.h +++ b/include/linux/filelock.h @@ -420,28 +420,38 @@ static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl) #ifdef CONFIG_FILE_LOCKING static inline int break_lease(struct inode *inode, unsigned int mode) { + struct file_lock_context *flctx; + /* * Since this check is lockless, we must ensure that any refcounts * taken are done before checking i_flctx->flc_lease. Otherwise, we * could end up racing with tasks trying to set a new lease on this * file. */ + flctx = READ_ONCE(inode->i_flctx); + if (!flctx) + return 0; smp_mb(); - if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + if (!list_empty_careful(&flctx->flc_lease)) return __break_lease(inode, mode, FL_LEASE); return 0; } static inline int break_deleg(struct inode *inode, unsigned int mode) { + struct file_lock_context *flctx; + /* * Since this check is lockless, we must ensure that any refcounts * taken are done before checking i_flctx->flc_lease. Otherwise, we * could end up racing with tasks trying to set a new lease on this * file. */ + flctx = READ_ONCE(inode->i_flctx); + if (!flctx) + return 0; smp_mb(); - if (inode->i_flctx && !list_empty_careful(&inode->i_flctx->flc_lease)) + if (!list_empty_careful(&flctx->flc_lease)) return __break_lease(inode, mode, FL_DELEG); return 0; } diff --git a/include/linux/filter.h b/include/linux/filter.h index b6672ff61407..7d7578a8eac1 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h @@ -437,6 +437,16 @@ static inline bool insn_is_cast_user(const struct bpf_insn *insn) .off = OFF, \ .imm = 0 }) +/* Unconditional jumps, gotol pc + imm32 */ + +#define BPF_JMP32_A(IMM) \ + ((struct bpf_insn) { \ + .code = BPF_JMP32 | BPF_JA, \ + .dst_reg = 0, \ + .src_reg = 0, \ + .off = 0, \ + .imm = IMM }) + /* Relative call */ #define BPF_CALL_REL(TGT) \ @@ -1616,7 +1626,7 @@ extern struct static_key_false bpf_sk_lookup_enabled; _all_pass || _selected_sk ? SK_PASS : SK_DROP; \ }) -static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, +static inline bool bpf_sk_lookup_run_v4(const struct net *net, int protocol, const __be32 saddr, const __be16 sport, const __be32 daddr, const u16 dport, const int ifindex, struct sock **psk) @@ -1653,7 +1663,7 @@ static inline bool bpf_sk_lookup_run_v4(struct net *net, int protocol, } #if IS_ENABLED(CONFIG_IPV6) -static inline bool bpf_sk_lookup_run_v6(struct net *net, int protocol, +static inline bool bpf_sk_lookup_run_v6(const struct net *net, int protocol, const struct in6_addr *saddr, const __be16 sport, const struct in6_addr *daddr, diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 1cca14cf5652..b632eec3ab52 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -134,6 +134,8 @@ struct fw_card { __be32 topology_map[(CSR_TOPOLOGY_MAP_END - CSR_TOPOLOGY_MAP) / 4]; __be32 maint_utility_register; + + struct workqueue_struct *isoc_wq; }; static inline struct fw_card *fw_card_get(struct fw_card *card) @@ -509,6 +511,7 @@ union fw_iso_callback { struct fw_iso_context { struct fw_card *card; + struct work_struct work; int type; int channel; int speed; @@ -528,6 +531,25 @@ int fw_iso_context_queue(struct fw_iso_context *ctx, unsigned long payload); void fw_iso_context_queue_flush(struct fw_iso_context *ctx); int fw_iso_context_flush_completions(struct fw_iso_context *ctx); + +/** + * fw_iso_context_schedule_flush_completions() - schedule work item to process isochronous context. + * @ctx: the isochronous context + * + * Schedule a work item on workqueue to process the isochronous context. The registered callback + * function is called by the worker when a queued packet buffer with the interrupt flag is + * completed, either after transmission in the IT context or after being filled in the IR context. + * The callback function is also called when the header buffer in the context becomes full, If it + * is required to process the context in the current context, fw_iso_context_flush_completions() is + * available instead. + * + * Context: Any context. + */ +static inline void fw_iso_context_schedule_flush_completions(struct fw_iso_context *ctx) +{ + queue_work(ctx->card->isoc_wq, &ctx->work); +} + int fw_iso_context_start(struct fw_iso_context *ctx, int cycle, int sync, int tags); int fw_iso_context_stop(struct fw_iso_context *ctx); diff --git a/include/linux/firmware/imx/sm.h b/include/linux/firmware/imx/sm.h new file mode 100644 index 000000000000..9b85a3f028d1 --- /dev/null +++ b/include/linux/firmware/imx/sm.h @@ -0,0 +1,23 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * Copyright 2024 NXP + */ + +#ifndef _SCMI_IMX_H +#define _SCMI_IMX_H + +#include <linux/bitfield.h> +#include <linux/errno.h> +#include <linux/types.h> + +#define SCMI_IMX_CTRL_PDM_CLK_SEL 0 /* AON PDM clock sel */ +#define SCMI_IMX_CTRL_MQS1_SETTINGS 1 /* AON MQS settings */ +#define SCMI_IMX_CTRL_SAI1_MCLK 2 /* AON SAI1 MCLK */ +#define SCMI_IMX_CTRL_SAI3_MCLK 3 /* WAKE SAI3 MCLK */ +#define SCMI_IMX_CTRL_SAI4_MCLK 4 /* WAKE SAI4 MCLK */ +#define SCMI_IMX_CTRL_SAI5_MCLK 5 /* WAKE SAI5 MCLK */ + +int scmi_imx_misc_ctrl_get(u32 id, u32 *num, u32 *val); +int scmi_imx_misc_ctrl_set(u32 id, u32 val); + +#endif diff --git a/include/linux/folio_queue.h b/include/linux/folio_queue.h new file mode 100644 index 000000000000..955680c3bb5f --- /dev/null +++ b/include/linux/folio_queue.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Queue of folios definitions + * + * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + */ + +#ifndef _LINUX_FOLIO_QUEUE_H +#define _LINUX_FOLIO_QUEUE_H + +#include <linux/pagevec.h> + +/* + * Segment in a queue of running buffers. Each segment can hold a number of + * folios and a portion of the queue can be referenced with the ITER_FOLIOQ + * iterator. The possibility exists of inserting non-folio elements into the + * queue (such as gaps). + * + * Explicit prev and next pointers are used instead of a list_head to make it + * easier to add segments to tail and remove them from the head without the + * need for a lock. + */ +struct folio_queue { + struct folio_batch vec; /* Folios in the queue segment */ + u8 orders[PAGEVEC_SIZE]; /* Order of each folio */ + struct folio_queue *next; /* Next queue segment or NULL */ + struct folio_queue *prev; /* Previous queue segment of NULL */ + unsigned long marks; /* 1-bit mark per folio */ + unsigned long marks2; /* Second 1-bit mark per folio */ + unsigned long marks3; /* Third 1-bit mark per folio */ +#if PAGEVEC_SIZE > BITS_PER_LONG +#error marks is not big enough +#endif +}; + +static inline void folioq_init(struct folio_queue *folioq) +{ + folio_batch_init(&folioq->vec); + folioq->next = NULL; + folioq->prev = NULL; + folioq->marks = 0; + folioq->marks2 = 0; + folioq->marks3 = 0; +} + +static inline unsigned int folioq_nr_slots(const struct folio_queue *folioq) +{ + return PAGEVEC_SIZE; +} + +static inline unsigned int folioq_count(struct folio_queue *folioq) +{ + return folio_batch_count(&folioq->vec); +} + +static inline bool folioq_full(struct folio_queue *folioq) +{ + //return !folio_batch_space(&folioq->vec); + return folioq_count(folioq) >= folioq_nr_slots(folioq); +} + +static inline bool folioq_is_marked(const struct folio_queue *folioq, unsigned int slot) +{ + return test_bit(slot, &folioq->marks); +} + +static inline void folioq_mark(struct folio_queue *folioq, unsigned int slot) +{ + set_bit(slot, &folioq->marks); +} + +static inline void folioq_unmark(struct folio_queue *folioq, unsigned int slot) +{ + clear_bit(slot, &folioq->marks); +} + +static inline bool folioq_is_marked2(const struct folio_queue *folioq, unsigned int slot) +{ + return test_bit(slot, &folioq->marks2); +} + +static inline void folioq_mark2(struct folio_queue *folioq, unsigned int slot) +{ + set_bit(slot, &folioq->marks2); +} + +static inline void folioq_unmark2(struct folio_queue *folioq, unsigned int slot) +{ + clear_bit(slot, &folioq->marks2); +} + +static inline bool folioq_is_marked3(const struct folio_queue *folioq, unsigned int slot) +{ + return test_bit(slot, &folioq->marks3); +} + +static inline void folioq_mark3(struct folio_queue *folioq, unsigned int slot) +{ + set_bit(slot, &folioq->marks3); +} + +static inline void folioq_unmark3(struct folio_queue *folioq, unsigned int slot) +{ + clear_bit(slot, &folioq->marks3); +} + +static inline unsigned int __folio_order(struct folio *folio) +{ + if (!folio_test_large(folio)) + return 0; + return folio->_flags_1 & 0xff; +} + +static inline unsigned int folioq_append(struct folio_queue *folioq, struct folio *folio) +{ + unsigned int slot = folioq->vec.nr++; + + folioq->vec.folios[slot] = folio; + folioq->orders[slot] = __folio_order(folio); + return slot; +} + +static inline unsigned int folioq_append_mark(struct folio_queue *folioq, struct folio *folio) +{ + unsigned int slot = folioq->vec.nr++; + + folioq->vec.folios[slot] = folio; + folioq->orders[slot] = __folio_order(folio); + folioq_mark(folioq, slot); + return slot; +} + +static inline struct folio *folioq_folio(const struct folio_queue *folioq, unsigned int slot) +{ + return folioq->vec.folios[slot]; +} + +static inline unsigned int folioq_folio_order(const struct folio_queue *folioq, unsigned int slot) +{ + return folioq->orders[slot]; +} + +static inline size_t folioq_folio_size(const struct folio_queue *folioq, unsigned int slot) +{ + return PAGE_SIZE << folioq_folio_order(folioq, slot); +} + +static inline void folioq_clear(struct folio_queue *folioq, unsigned int slot) +{ + folioq->vec.folios[slot] = NULL; + folioq_unmark(folioq, slot); + folioq_unmark2(folioq, slot); + folioq_unmark3(folioq, slot); +} + +#endif /* _LINUX_FOLIO_QUEUE_H */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 6ca11e241a24..eae5b67e4a15 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -146,8 +146,7 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* Expect random access pattern */ #define FMODE_RANDOM ((__force fmode_t)(1 << 12)) -/* File is huge (eg. /dev/mem): treat loff_t as unsigned */ -#define FMODE_UNSIGNED_OFFSET ((__force fmode_t)(1 << 13)) +/* FMODE_* bit 13 */ /* File is opened with O_PATH; almost nothing can be done with it */ #define FMODE_PATH ((__force fmode_t)(1 << 14)) @@ -409,10 +408,10 @@ struct address_space_operations { int (*write_begin)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, - struct page **pagep, void **fsdata); + struct folio **foliop, void **fsdata); int (*write_end)(struct file *, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, - struct page *page, void *fsdata); + struct folio *folio, void *fsdata); /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ sector_t (*bmap)(struct address_space *, sector_t); @@ -683,7 +682,8 @@ struct inode { #endif /* Misc */ - unsigned long i_state; + u32 i_state; + /* 32-bit hole */ struct rw_semaphore i_rwsem; unsigned long dirtied_when; /* jiffies of first dirtying */ @@ -746,6 +746,21 @@ struct inode { void *i_private; /* fs or device private pointer */ } __randomize_layout; +/* + * Get bit address from inode->i_state to use with wait_var_event() + * infrastructre. + */ +#define inode_state_wait_address(inode, bit) ((char *)&(inode)->i_state + (bit)) + +struct wait_queue_head *inode_bit_waitqueue(struct wait_bit_queue_entry *wqe, + struct inode *inode, u32 bit); + +static inline void inode_wake_up_bit(struct inode *inode, u32 bit) +{ + /* Caller is responsible for correct memory barriers. */ + wake_up_var(inode_state_wait_address(inode, bit)); +} + struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode); static inline unsigned int i_blocksize(const struct inode *node) @@ -948,6 +963,7 @@ static inline unsigned imajor(const struct inode *inode) } struct fown_struct { + struct file *file; /* backpointer for security modules */ rwlock_t lock; /* protects pid, uid, euid fields */ struct pid *pid; /* pid or -pgrp where SIGIO should be sent */ enum pid_type pid_type; /* Kind of process group SIGIO should be sent to */ @@ -987,52 +1003,69 @@ static inline int ra_has_index(struct file_ra_state *ra, pgoff_t index) index < ra->start + ra->size); } -/* - * f_{lock,count,pos_lock} members can be highly contended and share - * the same cacheline. f_{lock,mode} are very frequently used together - * and so share the same cacheline as well. The read-mostly - * f_{path,inode,op} are kept on a separate cacheline. +/** + * struct file - Represents a file + * @f_count: reference count + * @f_lock: Protects f_ep, f_flags. Must not be taken from IRQ context. + * @f_mode: FMODE_* flags often used in hotpaths + * @f_op: file operations + * @f_mapping: Contents of a cacheable, mappable object. + * @private_data: filesystem or driver specific data + * @f_inode: cached inode + * @f_flags: file flags + * @f_iocb_flags: iocb flags + * @f_cred: stashed credentials of creator/opener + * @f_path: path of the file + * @f_pos_lock: lock protecting file position + * @f_pipe: specific to pipes + * @f_pos: file position + * @f_security: LSM security context of this file + * @f_owner: file owner + * @f_wb_err: writeback error + * @f_sb_err: per sb writeback errors + * @f_ep: link of all epoll hooks for this file + * @f_task_work: task work entry point + * @f_llist: work queue entrypoint + * @f_ra: file's readahead state + * @f_freeptr: Pointer used by SLAB_TYPESAFE_BY_RCU file cache (don't touch.) */ struct file { + atomic_long_t f_count; + spinlock_t f_lock; + fmode_t f_mode; + const struct file_operations *f_op; + struct address_space *f_mapping; + void *private_data; + struct inode *f_inode; + unsigned int f_flags; + unsigned int f_iocb_flags; + const struct cred *f_cred; + /* --- cacheline 1 boundary (64 bytes) --- */ + struct path f_path; union { - /* fput() uses task work when closing and freeing file (default). */ - struct callback_head f_task_work; - /* fput() must use workqueue (most kernel threads). */ - struct llist_node f_llist; - unsigned int f_iocb_flags; + /* regular files (with FMODE_ATOMIC_POS) and directories */ + struct mutex f_pos_lock; + /* pipes */ + u64 f_pipe; }; - - /* - * Protects f_ep, f_flags. - * Must not be taken from IRQ context. - */ - spinlock_t f_lock; - fmode_t f_mode; - atomic_long_t f_count; - struct mutex f_pos_lock; - loff_t f_pos; - unsigned int f_flags; - struct fown_struct f_owner; - const struct cred *f_cred; - struct file_ra_state f_ra; - struct path f_path; - struct inode *f_inode; /* cached value */ - const struct file_operations *f_op; - - u64 f_version; + loff_t f_pos; #ifdef CONFIG_SECURITY - void *f_security; + void *f_security; #endif - /* needed for tty driver, and maybe others */ - void *private_data; - + /* --- cacheline 2 boundary (128 bytes) --- */ + struct fown_struct *f_owner; + errseq_t f_wb_err; + errseq_t f_sb_err; #ifdef CONFIG_EPOLL - /* Used by fs/eventpoll.c to link all the hooks to this file */ - struct hlist_head *f_ep; -#endif /* #ifdef CONFIG_EPOLL */ - struct address_space *f_mapping; - errseq_t f_wb_err; - errseq_t f_sb_err; /* for syncfs */ + struct hlist_head *f_ep; +#endif + union { + struct callback_head f_task_work; + struct llist_node f_llist; + struct file_ra_state f_ra; + freeptr_t f_freeptr; + }; + /* --- cacheline 3 boundary (192 bytes) --- */ } __randomize_layout __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ @@ -1077,6 +1110,12 @@ struct file_lease; #define OFFT_OFFSET_MAX type_max(off_t) #endif +int file_f_owner_allocate(struct file *file); +static inline struct fown_struct *file_f_owner(const struct file *file) +{ + return READ_ONCE(file->f_owner); +} + extern void send_sigio(struct fown_struct *fown, int fd, int band); static inline struct inode *file_inode(const struct file *f) @@ -1125,7 +1164,7 @@ extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force extern int f_setown(struct file *filp, int who, int force); extern void f_delown(struct file *filp); extern pid_t f_getown(struct file *filp); -extern int send_sigurg(struct fown_struct *fown); +extern int send_sigurg(struct file *file); /* * sb->s_flags. Note that these mirror the equivalent MS_* flags where @@ -1190,6 +1229,7 @@ extern int send_sigurg(struct fown_struct *fown); #define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */ #define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */ #define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */ +#define SB_I_NOIDMAP 0x00002000 /* No idmapped mounts on this superblock */ /* Possible states of 'frozen' field */ enum { @@ -1268,7 +1308,7 @@ struct super_block { time64_t s_time_min; time64_t s_time_max; #ifdef CONFIG_FSNOTIFY - __u32 s_fsnotify_mask; + u32 s_fsnotify_mask; struct fsnotify_sb_info *s_fsnotify_info; #endif @@ -1684,7 +1724,7 @@ static inline bool __sb_start_write_trylock(struct super_block *sb, int level) #define __sb_writers_acquired(sb, lev) \ percpu_rwsem_acquire(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) #define __sb_writers_release(sb, lev) \ - percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], 1, _THIS_IP_) + percpu_rwsem_release(&(sb)->s_writers.rw_sem[(lev)-1], _THIS_IP_) /** * __sb_write_started - check if sb freeze level is held @@ -2074,6 +2114,8 @@ struct file_operations { #define FOP_DIO_PARALLEL_WRITE ((__force fop_flags_t)(1 << 3)) /* Contains huge pages */ #define FOP_HUGE_PAGES ((__force fop_flags_t)(1 << 4)) +/* Treat loff_t as unsigned (e.g., /dev/mem) */ +#define FOP_UNSIGNED_OFFSET ((__force fop_flags_t)(1 << 5)) /* Wrap a directory iterator that needs exclusive inode access */ int wrap_directory_iterator(struct file *, struct dir_context *, @@ -2373,8 +2415,6 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * * I_REFERENCED Marks the inode as recently references on the LRU list. * - * I_DIO_WAKEUP Never set. Only used as a key for wait_on_bit(). - * * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to * synchronize competing switching instances and to tell * wb stat updates to grab the i_pages lock. See @@ -2397,30 +2437,32 @@ static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src, * i_count. * * Q: What is the difference between I_WILL_FREE and I_FREEING? + * + * __I_{SYNC,NEW,LRU_ISOLATING} are used to derive unique addresses to wait + * upon. There's one free address left. */ -#define I_DIRTY_SYNC (1 << 0) -#define I_DIRTY_DATASYNC (1 << 1) -#define I_DIRTY_PAGES (1 << 2) -#define __I_NEW 3 +#define __I_NEW 0 #define I_NEW (1 << __I_NEW) -#define I_WILL_FREE (1 << 4) -#define I_FREEING (1 << 5) -#define I_CLEAR (1 << 6) -#define __I_SYNC 7 +#define __I_SYNC 1 #define I_SYNC (1 << __I_SYNC) -#define I_REFERENCED (1 << 8) -#define __I_DIO_WAKEUP 9 -#define I_DIO_WAKEUP (1 << __I_DIO_WAKEUP) +#define __I_LRU_ISOLATING 2 +#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING) + +#define I_DIRTY_SYNC (1 << 3) +#define I_DIRTY_DATASYNC (1 << 4) +#define I_DIRTY_PAGES (1 << 5) +#define I_WILL_FREE (1 << 6) +#define I_FREEING (1 << 7) +#define I_CLEAR (1 << 8) +#define I_REFERENCED (1 << 9) #define I_LINKABLE (1 << 10) #define I_DIRTY_TIME (1 << 11) -#define I_WB_SWITCH (1 << 13) -#define I_OVL_INUSE (1 << 14) -#define I_CREATING (1 << 15) -#define I_DONTCACHE (1 << 16) -#define I_SYNC_QUEUED (1 << 17) -#define I_PINNING_NETFS_WB (1 << 18) -#define __I_LRU_ISOLATING 19 -#define I_LRU_ISOLATING (1 << __I_LRU_ISOLATING) +#define I_WB_SWITCH (1 << 12) +#define I_OVL_INUSE (1 << 13) +#define I_CREATING (1 << 14) +#define I_DONTCACHE (1 << 15) +#define I_SYNC_QUEUED (1 << 16) +#define I_PINNING_NETFS_WB (1 << 17) #define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC) #define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES) @@ -2554,10 +2596,17 @@ struct super_block *sget(struct file_system_type *type, struct super_block *sget_dev(struct fs_context *fc, dev_t dev); /* Alas, no aliases. Too much hassle with bringing module.h everywhere */ -#define fops_get(fops) \ - (((fops) && try_module_get((fops)->owner) ? (fops) : NULL)) -#define fops_put(fops) \ - do { if (fops) module_put((fops)->owner); } while(0) +#define fops_get(fops) ({ \ + const struct file_operations *_fops = (fops); \ + (((_fops) && try_module_get((_fops)->owner) ? (_fops) : NULL)); \ +}) + +#define fops_put(fops) ({ \ + const struct file_operations *_fops = (fops); \ + if (_fops) \ + module_put((_fops)->owner); \ +}) + /* * This one is to be used *ONLY* from ->open() instances. * fops must be non-NULL, pinned down *and* module dependencies @@ -3100,7 +3149,14 @@ static inline bool is_zero_ino(ino_t ino) return (u32)ino == 0; } -extern void __iget(struct inode * inode); +/* + * inode->i_lock must be held + */ +static inline void __iget(struct inode *inode) +{ + atomic_inc(&inode->i_count); +} + extern void iget_failed(struct inode *); extern void clear_inode(struct inode *); extern void __destroy_inode(struct inode *); @@ -3183,6 +3239,8 @@ extern loff_t vfs_setpos(struct file *file, loff_t offset, loff_t maxsize); extern loff_t generic_file_llseek(struct file *file, loff_t offset, int whence); extern loff_t generic_file_llseek_size(struct file *file, loff_t offset, int whence, loff_t maxsize, loff_t eof); +loff_t generic_llseek_cookie(struct file *file, loff_t offset, int whence, + u64 *cookie); extern loff_t fixed_size_llseek(struct file *file, loff_t offset, int whence, loff_t size); extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); @@ -3220,7 +3278,9 @@ static inline ssize_t blockdev_direct_IO(struct kiocb *iocb, } #endif +bool inode_dio_finished(const struct inode *inode); void inode_dio_wait(struct inode *inode); +void inode_dio_wait_interruptible(struct inode *inode); /** * inode_dio_begin - signal start of a direct I/O requests @@ -3244,7 +3304,7 @@ static inline void inode_dio_begin(struct inode *inode) static inline void inode_dio_end(struct inode *inode) { if (atomic_dec_and_test(&inode->i_dio_count)) - wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); + wake_up_var(&inode->i_dio_count); } extern void inode_set_flags(struct inode *inode, unsigned int flags, @@ -3337,7 +3397,7 @@ extern ssize_t noop_direct_IO(struct kiocb *iocb, struct iov_iter *iter); extern int simple_empty(struct dentry *); extern int simple_write_begin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, - struct page **pagep, void **fsdata); + struct folio **foliop, void **fsdata); extern const struct address_space_operations ram_aops; extern int always_delete_dentry(const struct dentry *); extern struct inode *alloc_anon_inode(struct super_block *); @@ -3461,7 +3521,6 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags, if (flags & RWF_NOWAIT) { if (!(ki->ki_filp->f_mode & FMODE_NOWAIT)) return -EOPNOTSUPP; - kiocb_flags |= IOCB_NOIO; } if (flags & RWF_ATOMIC) { if (rw_type != WRITE) diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h index f3512fddf3d7..5b51c3d582d6 100644 --- a/include/linux/generic-radix-tree.h +++ b/include/linux/generic-radix-tree.h @@ -41,6 +41,7 @@ #include <linux/limits.h> #include <linux/log2.h> #include <linux/math.h> +#include <linux/slab.h> #include <linux/types.h> struct genradix_root; @@ -48,10 +49,63 @@ struct genradix_root; #define GENRADIX_NODE_SHIFT 9 #define GENRADIX_NODE_SIZE (1U << GENRADIX_NODE_SHIFT) +#define GENRADIX_ARY (GENRADIX_NODE_SIZE / sizeof(struct genradix_node *)) +#define GENRADIX_ARY_SHIFT ilog2(GENRADIX_ARY) + +/* depth that's needed for a genradix that can address up to ULONG_MAX: */ +#define GENRADIX_MAX_DEPTH \ + DIV_ROUND_UP(BITS_PER_LONG - GENRADIX_NODE_SHIFT, GENRADIX_ARY_SHIFT) + +#define GENRADIX_DEPTH_MASK \ + ((unsigned long) (roundup_pow_of_two(GENRADIX_MAX_DEPTH + 1) - 1)) + +static inline int genradix_depth_shift(unsigned depth) +{ + return GENRADIX_NODE_SHIFT + GENRADIX_ARY_SHIFT * depth; +} + +/* + * Returns size (of data, in bytes) that a tree of a given depth holds: + */ +static inline size_t genradix_depth_size(unsigned depth) +{ + return 1UL << genradix_depth_shift(depth); +} + +static inline unsigned genradix_root_to_depth(struct genradix_root *r) +{ + return (unsigned long) r & GENRADIX_DEPTH_MASK; +} + +static inline struct genradix_node *genradix_root_to_node(struct genradix_root *r) +{ + return (void *) ((unsigned long) r & ~GENRADIX_DEPTH_MASK); +} + struct __genradix { struct genradix_root *root; }; +struct genradix_node { + union { + /* Interior node: */ + struct genradix_node *children[GENRADIX_ARY]; + + /* Leaf: */ + u8 data[GENRADIX_NODE_SIZE]; + }; +}; + +static inline struct genradix_node *genradix_alloc_node(gfp_t gfp_mask) +{ + return kzalloc(GENRADIX_NODE_SIZE, gfp_mask); +} + +static inline void genradix_free_node(struct genradix_node *node) +{ + kfree(node); +} + /* * NOTE: currently, sizeof(_type) must not be larger than GENRADIX_NODE_SIZE: */ @@ -128,6 +182,30 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size) #define __genradix_idx_to_offset(_radix, _idx) \ __idx_to_offset(_idx, __genradix_obj_size(_radix)) +static inline void *__genradix_ptr_inlined(struct __genradix *radix, size_t offset) +{ + struct genradix_root *r = READ_ONCE(radix->root); + struct genradix_node *n = genradix_root_to_node(r); + unsigned level = genradix_root_to_depth(r); + unsigned shift = genradix_depth_shift(level); + + if (unlikely(ilog2(offset) >= genradix_depth_shift(level))) + return NULL; + + while (n && shift > GENRADIX_NODE_SHIFT) { + shift -= GENRADIX_ARY_SHIFT; + n = n->children[offset >> shift]; + offset &= (1UL << shift) - 1; + } + + return n ? &n->data[offset] : NULL; +} + +#define genradix_ptr_inlined(_radix, _idx) \ + (__genradix_cast(_radix) \ + __genradix_ptr_inlined(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx))) + void *__genradix_ptr(struct __genradix *, size_t); /** @@ -142,7 +220,24 @@ void *__genradix_ptr(struct __genradix *, size_t); __genradix_ptr(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx))) -void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t); +void *__genradix_ptr_alloc(struct __genradix *, size_t, + struct genradix_node **, gfp_t); + +#define genradix_ptr_alloc_inlined(_radix, _idx, _gfp) \ + (__genradix_cast(_radix) \ + (__genradix_ptr_inlined(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx)) ?: \ + __genradix_ptr_alloc(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx), \ + NULL, _gfp))) + +#define genradix_ptr_alloc_preallocated_inlined(_radix, _idx, _new_node, _gfp)\ + (__genradix_cast(_radix) \ + (__genradix_ptr_inlined(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx)) ?: \ + __genradix_ptr_alloc(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx), \ + _new_node, _gfp))) /** * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it @@ -157,7 +252,13 @@ void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t); (__genradix_cast(_radix) \ __genradix_ptr_alloc(&(_radix)->tree, \ __genradix_idx_to_offset(_radix, _idx), \ - _gfp)) + NULL, _gfp)) + +#define genradix_ptr_alloc_preallocated(_radix, _idx, _new_node, _gfp)\ + (__genradix_cast(_radix) \ + __genradix_ptr_alloc(&(_radix)->tree, \ + __genradix_idx_to_offset(_radix, _idx), \ + _new_node, _gfp)) struct genradix_iter { size_t offset; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index f53f76e0b17e..a951de920e20 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -319,7 +319,7 @@ static inline struct page *alloc_pages_mpol_noprof(gfp_t gfp, unsigned int order } static inline struct folio *folio_alloc_noprof(gfp_t gfp, unsigned int order) { - return __folio_alloc_node(gfp, order, numa_node_id()); + return __folio_alloc_node_noprof(gfp, order, numa_node_id()); } static inline struct folio *folio_alloc_mpol_noprof(gfp_t gfp, unsigned int order, struct mempolicy *mpol, pgoff_t ilx, int nid) @@ -446,4 +446,27 @@ extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_ #endif void free_contig_range(unsigned long pfn, unsigned long nr_pages); +#ifdef CONFIG_CONTIG_ALLOC +static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp, + int nid, nodemask_t *node) +{ + struct page *page; + + if (WARN_ON(!order || !(gfp & __GFP_COMP))) + return NULL; + + page = alloc_contig_pages_noprof(1 << order, gfp, nid, node); + + return page ? page_folio(page) : NULL; +} +#else +static inline struct folio *folio_alloc_gigantic_noprof(int order, gfp_t gfp, + int nid, nodemask_t *node) +{ + return NULL; +} +#endif +/* This should be paired with folio_put() rather than free_contig_range(). */ +#define folio_alloc_gigantic(...) alloc_hooks(folio_alloc_gigantic_noprof(__VA_ARGS__)) + #endif /* __LINUX_GFP_H */ diff --git a/include/linux/gfp_types.h b/include/linux/gfp_types.h index 313be4ad79fd..65db9349f905 100644 --- a/include/linux/gfp_types.h +++ b/include/linux/gfp_types.h @@ -215,7 +215,8 @@ enum { * the caller still has to check for failures) while costly requests try to be * not disruptive and back off even without invoking the OOM killer. * The following three modifiers might be used to override some of these - * implicit rules. + * implicit rules. Please note that all of them must be used along with + * %__GFP_DIRECT_RECLAIM flag. * * %__GFP_NORETRY: The VM implementation will try only very lightweight * memory direct reclaim to get some memory under memory pressure (thus @@ -246,11 +247,14 @@ enum { * cannot handle allocation failures. The allocation could block * indefinitely but will never return with failure. Testing for * failure is pointless. + * It _must_ be blockable and used together with __GFP_DIRECT_RECLAIM. + * It should _never_ be used in non-sleepable contexts. * New users should be evaluated carefully (and the flag should be * used only when there is no reasonable failure policy) but it is * definitely preferable to use the flag rather than opencode endless * loop around allocator. - * Using this flag for costly allocations is _highly_ discouraged. + * Allocating pages from the buddy with __GFP_NOFAIL and order > 1 is + * not supported. Please consider using kvmalloc() instead. */ #define __GFP_IO ((__force gfp_t)___GFP_IO) #define __GFP_FS ((__force gfp_t)___GFP_FS) diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 063f71b18a7c..2d105be7bbc3 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h @@ -17,15 +17,9 @@ struct device; /* make these flag values available regardless of GPIO kconfig options */ -#define GPIOF_DIR_OUT (0 << 0) -#define GPIOF_DIR_IN (1 << 0) - -#define GPIOF_INIT_LOW (0 << 1) -#define GPIOF_INIT_HIGH (1 << 1) - -#define GPIOF_IN (GPIOF_DIR_IN) -#define GPIOF_OUT_INIT_LOW (GPIOF_DIR_OUT | GPIOF_INIT_LOW) -#define GPIOF_OUT_INIT_HIGH (GPIOF_DIR_OUT | GPIOF_INIT_HIGH) +#define GPIOF_IN ((1 << 0)) +#define GPIOF_OUT_INIT_LOW ((0 << 0) | (0 << 1)) +#define GPIOF_OUT_INIT_HIGH ((0 << 0) | (1 << 1)) /* Gpio pin is active-low */ #define GPIOF_ACTIVE_LOW (1 << 2) diff --git a/include/linux/hid.h b/include/linux/hid.h index 1533c9dcd3a6..121d5b8bc867 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -46,7 +46,7 @@ struct hid_item { __s16 s16; __u32 u32; __s32 s32; - __u8 *longdata; + const __u8 *longdata; } data; }; @@ -600,9 +600,9 @@ struct hid_driver; struct hid_ll_driver; struct hid_device { /* device report descriptor */ - __u8 *dev_rdesc; + const __u8 *dev_rdesc; unsigned dev_rsize; - __u8 *rdesc; + const __u8 *rdesc; unsigned rsize; struct hid_collection *collection; /* List of HID collections */ unsigned collection_size; /* Number of allocated hid_collections */ @@ -822,7 +822,7 @@ struct hid_driver { struct hid_usage *usage, __s32 value); void (*report)(struct hid_device *hdev, struct hid_report *report); - __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf, + const __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf, unsigned int *size); int (*input_mapping)(struct hid_device *hdev, @@ -940,6 +940,8 @@ extern void hidinput_report_event(struct hid_device *hid, struct hid_report *rep extern int hidinput_connect(struct hid_device *hid, unsigned int force); extern void hidinput_disconnect(struct hid_device *); +struct hid_field *hid_find_field(struct hid_device *hdev, unsigned int report_type, + unsigned int application, unsigned int usage); int hid_set_field(struct hid_field *, unsigned, __s32); int hid_input_report(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 size, int interrupt); @@ -953,7 +955,7 @@ struct hid_device *hid_allocate_device(void); struct hid_report *hid_register_report(struct hid_device *device, enum hid_report_type type, unsigned int id, unsigned int application); -int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); +int hid_parse_report(struct hid_device *hid, const __u8 *start, unsigned size); struct hid_report *hid_validate_values(struct hid_device *hid, enum hid_report_type type, unsigned int id, unsigned int field_index, diff --git a/include/linux/hid_bpf.h b/include/linux/hid_bpf.h index d4d063cf63b5..6a47223e6460 100644 --- a/include/linux/hid_bpf.h +++ b/include/linux/hid_bpf.h @@ -212,7 +212,7 @@ int hid_bpf_connect_device(struct hid_device *hdev); void hid_bpf_disconnect_device(struct hid_device *hdev); void hid_bpf_destroy_device(struct hid_device *hid); int hid_bpf_device_init(struct hid_device *hid); -u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, u8 *rdesc, unsigned int *size); +u8 *call_hid_bpf_rdesc_fixup(struct hid_device *hdev, const u8 *rdesc, unsigned int *size); #else /* CONFIG_HID_BPF */ static inline u8 *dispatch_hid_bpf_device_event(struct hid_device *hid, enum hid_report_type type, u8 *data, u32 *size, int interrupt, diff --git a/include/linux/hidraw.h b/include/linux/hidraw.h index cd67f4ca5599..18fd30a288de 100644 --- a/include/linux/hidraw.h +++ b/include/linux/hidraw.h @@ -32,6 +32,7 @@ struct hidraw_list { struct hidraw *hidraw; struct list_head node; struct mutex read_mutex; + bool revoked; }; #ifdef CONFIG_HIDRAW diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index e25d9ebfdf89..67d0ab3c3bba 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -76,9 +76,9 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; /* * Mask of all large folio orders supported for file THP. Folios in a DAX * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to - * it. + * it. Same to PFNMAPs where there's neither page* nor pagecache. */ -#define THP_ORDERS_ALL_FILE_DAX \ +#define THP_ORDERS_ALL_SPECIAL \ (BIT(PMD_ORDER) | BIT(PUD_ORDER)) #define THP_ORDERS_ALL_FILE_DEFAULT \ ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0)) @@ -87,7 +87,7 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; * Mask of all large folio orders supported for THP. */ #define THP_ORDERS_ALL \ - (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_FILE_DAX | THP_ORDERS_ALL_FILE_DEFAULT) + (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT) #define TVA_SMAPS (1 << 0) /* Will be used for procfs */ #define TVA_IN_PF (1 << 1) /* Page fault handler */ @@ -96,6 +96,8 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \ (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order))) +#define split_folio(f) split_folio_to_list(f, NULL) + #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES #define HPAGE_PMD_SHIFT PMD_SHIFT #define HPAGE_PUD_SHIFT PUD_SHIFT @@ -114,6 +116,53 @@ extern struct kobj_attribute thpsize_shmem_enabled_attr; #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1)) #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT) +enum mthp_stat_item { + MTHP_STAT_ANON_FAULT_ALLOC, + MTHP_STAT_ANON_FAULT_FALLBACK, + MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, + MTHP_STAT_SWPOUT, + MTHP_STAT_SWPOUT_FALLBACK, + MTHP_STAT_SHMEM_ALLOC, + MTHP_STAT_SHMEM_FALLBACK, + MTHP_STAT_SHMEM_FALLBACK_CHARGE, + MTHP_STAT_SPLIT, + MTHP_STAT_SPLIT_FAILED, + MTHP_STAT_SPLIT_DEFERRED, + MTHP_STAT_NR_ANON, + MTHP_STAT_NR_ANON_PARTIALLY_MAPPED, + __MTHP_STAT_COUNT +}; + +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS) +struct mthp_stat { + unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; +}; + +DECLARE_PER_CPU(struct mthp_stat, mthp_stats); + +static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) +{ + if (order <= 0 || order > PMD_ORDER) + return; + + this_cpu_add(mthp_stats.stats[order][item], delta); +} + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ + mod_mthp_stat(order, item, 1); +} + +#else +static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta) +{ +} + +static inline void count_mthp_stat(int order, enum mthp_stat_item item) +{ +} +#endif + #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern unsigned long transparent_hugepage_flags; @@ -269,41 +318,6 @@ struct thpsize { #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj) -enum mthp_stat_item { - MTHP_STAT_ANON_FAULT_ALLOC, - MTHP_STAT_ANON_FAULT_FALLBACK, - MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE, - MTHP_STAT_SWPOUT, - MTHP_STAT_SWPOUT_FALLBACK, - MTHP_STAT_SHMEM_ALLOC, - MTHP_STAT_SHMEM_FALLBACK, - MTHP_STAT_SHMEM_FALLBACK_CHARGE, - MTHP_STAT_SPLIT, - MTHP_STAT_SPLIT_FAILED, - MTHP_STAT_SPLIT_DEFERRED, - __MTHP_STAT_COUNT -}; - -struct mthp_stat { - unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT]; -}; - -#ifdef CONFIG_SYSFS -DECLARE_PER_CPU(struct mthp_stat, mthp_stats); - -static inline void count_mthp_stat(int order, enum mthp_stat_item item) -{ - if (order <= 0 || order > PMD_ORDER) - return; - - this_cpu_inc(mthp_stats.stats[order][item]); -} -#else -static inline void count_mthp_stat(int order, enum mthp_stat_item item) -{ -} -#endif - #define transparent_hugepage_use_zero_page() \ (transparent_hugepage_flags & \ (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) @@ -314,14 +328,29 @@ unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long add unsigned long len, unsigned long pgoff, unsigned long flags, vm_flags_t vm_flags); -bool can_split_folio(struct folio *folio, int *pextra_pins); +bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins); int split_huge_page_to_list_to_order(struct page *page, struct list_head *list, unsigned int new_order); +int min_order_for_split(struct folio *folio); +int split_folio_to_list(struct folio *folio, struct list_head *list); static inline int split_huge_page(struct page *page) { - return split_huge_page_to_list_to_order(page, NULL, 0); + struct folio *folio = page_folio(page); + int ret = min_order_for_split(folio); + + if (ret < 0) + return ret; + + /* + * split_huge_page() locks the page before splitting and + * expects the same page that has been split to be locked when + * returned. split_folio(page_folio(page)) cannot be used here + * because it converts the page to folio and passes the head + * page to be split. + */ + return split_huge_page_to_list_to_order(page, NULL, ret); } -void deferred_split_folio(struct folio *folio); +void deferred_split_folio(struct folio *folio, bool partially_mapped); void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long address, bool freeze, struct folio *folio); @@ -342,6 +371,17 @@ void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address, void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, unsigned long address); +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, + pud_t *pudp, unsigned long addr, pgprot_t newprot, + unsigned long cp_flags); +#else +static inline int +change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, + pud_t *pudp, unsigned long addr, pgprot_t newprot, + unsigned long cp_flags) { return 0; } +#endif + #define split_huge_pud(__vma, __pud, __address) \ do { \ pud_t *____pud = (__pud); \ @@ -410,11 +450,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); } -static inline bool is_huge_zero_pud(pud_t pud) -{ - return false; -} - struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); void mm_put_huge_zero_folio(struct mm_struct *mm); @@ -470,7 +505,7 @@ thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, } static inline bool -can_split_folio(struct folio *folio, int *pextra_pins) +can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins) { return false; } @@ -484,7 +519,13 @@ static inline int split_huge_page(struct page *page) { return 0; } -static inline void deferred_split_folio(struct folio *folio) {} + +static inline int split_folio_to_list(struct folio *folio, struct list_head *list) +{ + return 0; +} + +static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {} #define split_huge_pmd(__vma, __pmd, __address) \ do { } while (0) @@ -555,11 +596,6 @@ static inline bool is_huge_zero_pmd(pmd_t pmd) return false; } -static inline bool is_huge_zero_pud(pud_t pud) -{ - return false; -} - static inline void mm_put_huge_zero_folio(struct mm_struct *mm) { return; @@ -585,6 +621,19 @@ static inline int next_order(unsigned long *orders, int prev) { return 0; } + +static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud, + unsigned long address) +{ +} + +static inline int change_huge_pud(struct mmu_gather *tlb, + struct vm_area_struct *vma, pud_t *pudp, + unsigned long addr, pgprot_t newprot, + unsigned long cp_flags) +{ + return 0; +} #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static inline int split_folio_to_list_to_order(struct folio *folio, @@ -598,7 +647,4 @@ static inline int split_folio_to_order(struct folio *folio, int new_order) return split_folio_to_list_to_order(folio, NULL, new_order); } -#define split_folio_to_list(f, l) split_folio_to_list_to_order(f, l, 0) -#define split_folio(f) split_folio_to_order(f, 0) - #endif /* _LINUX_HUGE_MM_H */ diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 45bf05ad5c53..98c47c394b89 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -127,9 +127,6 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma, unsigned long len); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *, struct vm_area_struct *); -struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, - unsigned long address, unsigned int flags, - unsigned int *page_mask); void unmap_hugepage_range(struct vm_area_struct *, unsigned long, unsigned long, struct page *, zap_flags_t); @@ -899,10 +896,11 @@ static inline bool hugepage_movable_supported(struct hstate *h) /* Movability of hugepages depends on migration support. */ static inline gfp_t htlb_alloc_mask(struct hstate *h) { - if (hugepage_movable_supported(h)) - return GFP_HIGHUSER_MOVABLE; - else - return GFP_HIGHUSER; + gfp_t gfp = __GFP_COMP | __GFP_NOWARN; + + gfp |= hugepage_movable_supported(h) ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER; + + return gfp; } static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) @@ -1251,7 +1249,7 @@ static inline __init void hugetlb_cma_reserve(int order) } #endif -#ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE +#ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING static inline bool hugetlb_pmd_shared(pte_t *pte) { return page_count(virt_to_page(pte)) > 1; @@ -1287,8 +1285,7 @@ bool __vma_private_lock(struct vm_area_struct *vma); static inline pte_t * hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) { -#if defined(CONFIG_HUGETLB_PAGE) && \ - defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP) +#if defined(CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING) && defined(CONFIG_LOCKDEP) struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; /* diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h index e94314760aab..5c6a421ad580 100644 --- a/include/linux/hwmon.h +++ b/include/linux/hwmon.h @@ -481,7 +481,6 @@ devm_hwmon_device_register_with_info(struct device *dev, const struct attribute_group **extra_groups); void hwmon_device_unregister(struct device *dev); -void devm_hwmon_device_unregister(struct device *dev); int hwmon_notify_event(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel); diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 377def497298..388ce71a29a9 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h @@ -761,6 +761,9 @@ struct i2c_adapter { struct regulator *bus_regulator; struct dentry *debugfs; + + /* 7bit address space */ + DECLARE_BITMAP(addrs_in_instantiation, 1 << 7); }; #define to_i2c_adapter(d) container_of(d, struct i2c_adapter, dev) diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index 074f632868d9..2a1ed05d5782 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -278,6 +278,20 @@ enum i3c_bus_mode { }; /** + * enum i3c_open_drain_speed - I3C open-drain speed + * @I3C_OPEN_DRAIN_SLOW_SPEED: Slow open-drain speed for sending the first + * broadcast address. The first broadcast address at this speed + * will be visible to all devices on the I3C bus. I3C devices + * working in I2C mode will turn off their spike filter when + * switching into I3C mode. + * @I3C_OPEN_DRAIN_NORMAL_SPEED: Normal open-drain speed in I3C bus mode. + */ +enum i3c_open_drain_speed { + I3C_OPEN_DRAIN_SLOW_SPEED, + I3C_OPEN_DRAIN_NORMAL_SPEED, +}; + +/** * enum i3c_addr_slot_status - I3C address slot status * @I3C_ADDR_SLOT_FREE: address is free * @I3C_ADDR_SLOT_RSVD: address is reserved @@ -436,6 +450,7 @@ struct i3c_bus { * NULL. * @enable_hotjoin: enable hot join event detect. * @disable_hotjoin: disable hot join event detect. + * @set_speed: adjust I3C open drain mode timing. */ struct i3c_master_controller_ops { int (*bus_init)(struct i3c_master_controller *master); @@ -464,6 +479,7 @@ struct i3c_master_controller_ops { struct i3c_ibi_slot *slot); int (*enable_hotjoin)(struct i3c_master_controller *master); int (*disable_hotjoin)(struct i3c_master_controller *master); + int (*set_speed)(struct i3c_master_controller *master, enum i3c_open_drain_speed speed); }; /** diff --git a/include/linux/i8253.h b/include/linux/i8253.h index 8336b2f6f834..56c280eb2d4f 100644 --- a/include/linux/i8253.h +++ b/include/linux/i8253.h @@ -21,9 +21,9 @@ #define PIT_LATCH ((PIT_TICK_RATE + HZ/2) / HZ) extern raw_spinlock_t i8253_lock; -extern bool i8253_clear_counter_on_shutdown; extern struct clock_event_device i8253_clockevent; extern void clockevent_i8253_init(bool oneshot); +extern void clockevent_i8253_disable(void); extern void setup_pit_timer(void); diff --git a/include/linux/if_rmnet.h b/include/linux/if_rmnet.h index 839d1e48b85e..c44bf6e80ecb 100644 --- a/include/linux/if_rmnet.h +++ b/include/linux/if_rmnet.h @@ -42,7 +42,7 @@ struct rmnet_map_ul_csum_header { /* csum_info field: * OFFSET: where (offset in bytes) to insert computed checksum - * UDP: 1 = UDP checksum (zero checkum means no checksum) + * UDP: 1 = UDP checksum (zero checksum means no checksum) * ENABLED: 1 = checksum computation requested */ #define MAP_CSUM_UL_OFFSET_MASK GENMASK(13, 0) diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h index b8d8d69eba29..90867f44ab4d 100644 --- a/include/linux/input/matrix_keypad.h +++ b/include/linux/input/matrix_keypad.h @@ -34,52 +34,6 @@ struct matrix_keymap_data { unsigned int keymap_size; }; -/** - * struct matrix_keypad_platform_data - platform-dependent keypad data - * @keymap_data: pointer to &matrix_keymap_data - * @row_gpios: pointer to array of gpio numbers representing rows - * @col_gpios: pointer to array of gpio numbers reporesenting colums - * @num_row_gpios: actual number of row gpios used by device - * @num_col_gpios: actual number of col gpios used by device - * @col_scan_delay_us: delay, measured in microseconds, that is - * needed before we can keypad after activating column gpio - * @debounce_ms: debounce interval in milliseconds - * @clustered_irq: may be specified if interrupts of all row/column GPIOs - * are bundled to one single irq - * @clustered_irq_flags: flags that are needed for the clustered irq - * @active_low: gpio polarity - * @wakeup: controls whether the device should be set up as wakeup - * source - * @no_autorepeat: disable key autorepeat - * @drive_inactive_cols: drive inactive columns during scan, rather than - * making them inputs. - * - * This structure represents platform-specific data that use used by - * matrix_keypad driver to perform proper initialization. - */ -struct matrix_keypad_platform_data { - const struct matrix_keymap_data *keymap_data; - - const unsigned int *row_gpios; - const unsigned int *col_gpios; - - unsigned int num_row_gpios; - unsigned int num_col_gpios; - - unsigned int col_scan_delay_us; - - /* key debounce interval in milli-second */ - unsigned int debounce_ms; - - unsigned int clustered_irq; - unsigned int clustered_irq_flags; - - bool active_low; - bool wakeup; - bool no_autorepeat; - bool drive_inactive_cols; -}; - int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, const char *keymap_name, unsigned int rows, unsigned int cols, @@ -88,6 +42,4 @@ int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data, int matrix_keypad_parse_properties(struct device *dev, unsigned int *rows, unsigned int *cols); -#define matrix_keypad_parse_of_params matrix_keypad_parse_properties - #endif /* _MATRIX_KEYPAD_H */ diff --git a/include/linux/intel_vsec.h b/include/linux/intel_vsec.h new file mode 100644 index 000000000000..11ee185566c3 --- /dev/null +++ b/include/linux/intel_vsec.h @@ -0,0 +1,149 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _INTEL_VSEC_H +#define _INTEL_VSEC_H + +#include <linux/auxiliary_bus.h> +#include <linux/bits.h> + +#define VSEC_CAP_TELEMETRY BIT(0) +#define VSEC_CAP_WATCHER BIT(1) +#define VSEC_CAP_CRASHLOG BIT(2) +#define VSEC_CAP_SDSI BIT(3) +#define VSEC_CAP_TPMI BIT(4) + +/* Intel DVSEC offsets */ +#define INTEL_DVSEC_ENTRIES 0xA +#define INTEL_DVSEC_SIZE 0xB +#define INTEL_DVSEC_TABLE 0xC +#define INTEL_DVSEC_TABLE_BAR(x) ((x) & GENMASK(2, 0)) +#define INTEL_DVSEC_TABLE_OFFSET(x) ((x) & GENMASK(31, 3)) +#define TABLE_OFFSET_SHIFT 3 + +struct pci_dev; +struct resource; + +enum intel_vsec_id { + VSEC_ID_TELEMETRY = 2, + VSEC_ID_WATCHER = 3, + VSEC_ID_CRASHLOG = 4, + VSEC_ID_SDSI = 65, + VSEC_ID_TPMI = 66, +}; + +/** + * struct intel_vsec_header - Common fields of Intel VSEC and DVSEC registers. + * @rev: Revision ID of the VSEC/DVSEC register space + * @length: Length of the VSEC/DVSEC register space + * @id: ID of the feature + * @num_entries: Number of instances of the feature + * @entry_size: Size of the discovery table for each feature + * @tbir: BAR containing the discovery tables + * @offset: BAR offset of start of the first discovery table + */ +struct intel_vsec_header { + u8 rev; + u16 length; + u16 id; + u8 num_entries; + u8 entry_size; + u8 tbir; + u32 offset; +}; + +enum intel_vsec_quirks { + /* Watcher feature not supported */ + VSEC_QUIRK_NO_WATCHER = BIT(0), + + /* Crashlog feature not supported */ + VSEC_QUIRK_NO_CRASHLOG = BIT(1), + + /* Use shift instead of mask to read discovery table offset */ + VSEC_QUIRK_TABLE_SHIFT = BIT(2), + + /* DVSEC not present (provided in driver data) */ + VSEC_QUIRK_NO_DVSEC = BIT(3), + + /* Platforms requiring quirk in the auxiliary driver */ + VSEC_QUIRK_EARLY_HW = BIT(4), +}; + +/** + * struct pmt_callbacks - Callback infrastructure for PMT devices + * ->read_telem() when specified, called by client driver to access PMT data (instead + * of direct copy). + * @pdev: PCI device reference for the callback's use + * @guid: ID of data to acccss + * @data: buffer for the data to be copied + * @count: size of buffer + */ +struct pmt_callbacks { + int (*read_telem)(struct pci_dev *pdev, u32 guid, u64 *data, u32 count); +}; + +/** + * struct intel_vsec_platform_info - Platform specific data + * @parent: parent device in the auxbus chain + * @headers: list of headers to define the PMT client devices to create + * @priv_data: private data, usable by parent devices, currently a callback + * @caps: bitmask of PMT capabilities for the given headers + * @quirks: bitmask of VSEC device quirks + * @base_addr: allow a base address to be specified (rather than derived) + */ +struct intel_vsec_platform_info { + struct device *parent; + struct intel_vsec_header **headers; + void *priv_data; + unsigned long caps; + unsigned long quirks; + u64 base_addr; +}; + +/** + * struct intel_sec_device - Auxbus specific device information + * @auxdev: auxbus device struct for auxbus access + * @pcidev: pci device associated with the device + * @resource: any resources shared by the parent + * @ida: id reference + * @num_resources: number of resources + * @id: xarray id + * @priv_data: any private data needed + * @quirks: specified quirks + * @base_addr: base address of entries (if specified) + */ +struct intel_vsec_device { + struct auxiliary_device auxdev; + struct pci_dev *pcidev; + struct resource *resource; + struct ida *ida; + int num_resources; + int id; /* xa */ + void *priv_data; + size_t priv_data_size; + unsigned long quirks; + u64 base_addr; +}; + +int intel_vsec_add_aux(struct pci_dev *pdev, struct device *parent, + struct intel_vsec_device *intel_vsec_dev, + const char *name); + +static inline struct intel_vsec_device *dev_to_ivdev(struct device *dev) +{ + return container_of(dev, struct intel_vsec_device, auxdev.dev); +} + +static inline struct intel_vsec_device *auxdev_to_ivdev(struct auxiliary_device *auxdev) +{ + return container_of(auxdev, struct intel_vsec_device, auxdev); +} + +#if IS_ENABLED(CONFIG_INTEL_VSEC) +void intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info); +#else +static inline void intel_vsec_register(struct pci_dev *pdev, + struct intel_vsec_platform_info *info) +{ +} +#endif +#endif diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 3f30c88e0b4c..457151f9f263 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -276,7 +276,7 @@ struct irq_affinity_notify { #define IRQ_AFFINITY_MAX_SETS 4 /** - * struct irq_affinity - Description for automatic irq affinity assignements + * struct irq_affinity - Description for automatic irq affinity assignments * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of * the MSI(-X) vector space * @post_vectors: Don't apply affinity to @post_vectors at end of @@ -594,7 +594,7 @@ extern const char * const softirq_to_name[NR_SOFTIRQS]; struct softirq_action { - void (*action)(struct softirq_action *); + void (*action)(void); }; asmlinkage void do_softirq(void); @@ -609,7 +609,7 @@ static inline void do_softirq_post_smp_call_flush(unsigned int unused) } #endif -extern void open_softirq(int nr, void (*action)(struct softirq_action *)); +extern void open_softirq(int nr, void (*action)(void)); extern void softirq_init(void); extern void __raise_softirq_irqoff(unsigned int nr); diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h index f9a81761bfce..b1ecfc3cd5bc 100644 --- a/include/linux/io-pgtable.h +++ b/include/linux/io-pgtable.h @@ -171,6 +171,10 @@ struct io_pgtable_cfg { u64 ttbr[4]; u32 n_ttbrs; } apple_dart_cfg; + + struct { + int nid; + } amd; }; }; diff --git a/include/linux/io_uring/cmd.h b/include/linux/io_uring/cmd.h index 447fbfd32215..c189d36ad55e 100644 --- a/include/linux/io_uring/cmd.h +++ b/include/linux/io_uring/cmd.h @@ -23,6 +23,15 @@ static inline const void *io_uring_sqe_cmd(const struct io_uring_sqe *sqe) return sqe->cmd; } +static inline void io_uring_cmd_private_sz_check(size_t cmd_sz) +{ + BUILD_BUG_ON(cmd_sz > sizeof_field(struct io_uring_cmd, pdu)); +} +#define io_uring_cmd_to_pdu(cmd, pdu_type) ( \ + io_uring_cmd_private_sz_check(sizeof(pdu_type)), \ + ((pdu_type *)&(cmd)->pdu) \ +) + #if defined(CONFIG_IO_URING) int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, struct iov_iter *iter, void *ioucmd); @@ -48,6 +57,9 @@ void __io_uring_cmd_do_in_task(struct io_uring_cmd *ioucmd, void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, unsigned int issue_flags); +/* Execute the request from a blocking context */ +void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd); + #else static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw, struct iov_iter *iter, void *ioucmd) @@ -67,6 +79,9 @@ static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd, unsigned int issue_flags) { } +static inline void io_uring_cmd_issue_blocking(struct io_uring_cmd *ioucmd) +{ +} #endif /* diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h index 3315005df117..4b9ba523978d 100644 --- a/include/linux/io_uring_types.h +++ b/include/linux/io_uring_types.h @@ -239,6 +239,9 @@ struct io_ring_ctx { struct io_rings *rings; struct percpu_ref refs; + clockid_t clockid; + enum tk_offsets clock_offset; + enum task_work_notify_mode notify_method; unsigned sq_thread_idle; } ____cacheline_aligned_in_smp; diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 6fc1c858013d..4ad12a3c8bae 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -257,11 +257,7 @@ static inline const struct iomap *iomap_iter_srcmap(const struct iomap_iter *i) } ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from, - const struct iomap_ops *ops); -int iomap_file_buffered_write_punch_delalloc(struct inode *inode, - struct iomap *iomap, loff_t pos, loff_t length, ssize_t written, - int (*punch)(struct inode *inode, loff_t pos, loff_t length)); - + const struct iomap_ops *ops, void *private); int iomap_read_folio(struct folio *folio, const struct iomap_ops *ops); void iomap_readahead(struct readahead_control *, const struct iomap_ops *ops); bool iomap_is_partially_uptodate(struct folio *, size_t from, size_t count); @@ -277,6 +273,13 @@ int iomap_truncate_page(struct inode *inode, loff_t pos, bool *did_zero, const struct iomap_ops *ops); vm_fault_t iomap_page_mkwrite(struct vm_fault *vmf, const struct iomap_ops *ops); + +typedef void (*iomap_punch_t)(struct inode *inode, loff_t offset, loff_t length, + struct iomap *iomap); +void iomap_file_buffered_write_punch_delalloc(struct inode *inode, loff_t pos, + loff_t length, ssize_t written, unsigned flag, + struct iomap *iomap, iomap_punch_t punch); + int iomap_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, u64 start, u64 len, const struct iomap_ops *ops); loff_t iomap_seek_hole(struct inode *inode, loff_t offset, diff --git a/include/linux/iommu-dma.h b/include/linux/iommu-dma.h new file mode 100644 index 000000000000..508beaa44c39 --- /dev/null +++ b/include/linux/iommu-dma.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved + * + * DMA operations that map physical memory through IOMMU. + */ +#ifndef _LINUX_IOMMU_DMA_H +#define _LINUX_IOMMU_DMA_H + +#include <linux/dma-direction.h> + +#ifdef CONFIG_IOMMU_DMA +static inline bool use_dma_iommu(struct device *dev) +{ + return dev->dma_iommu; +} +#else +static inline bool use_dma_iommu(struct device *dev) +{ + return false; +} +#endif /* CONFIG_IOMMU_DMA */ + +dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, enum dma_data_direction dir, + unsigned long attrs); +void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, unsigned long attrs); +int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs); +void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, + enum dma_data_direction dir, unsigned long attrs); +void *iommu_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, + gfp_t gfp, unsigned long attrs); +int iommu_dma_mmap(struct device *dev, struct vm_area_struct *vma, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +int iommu_dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, size_t size, + unsigned long attrs); +unsigned long iommu_dma_get_merge_boundary(struct device *dev); +size_t iommu_dma_opt_mapping_size(void); +size_t iommu_dma_max_mapping_size(struct device *dev); +void iommu_dma_free(struct device *dev, size_t size, void *cpu_addr, + dma_addr_t handle, unsigned long attrs); +dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, + size_t size, enum dma_data_direction dir, unsigned long attrs); +void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, unsigned long attrs); +struct sg_table *iommu_dma_alloc_noncontiguous(struct device *dev, size_t size, + enum dma_data_direction dir, gfp_t gfp, unsigned long attrs); +void iommu_dma_free_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt, enum dma_data_direction dir); +void *iommu_dma_vmap_noncontiguous(struct device *dev, size_t size, + struct sg_table *sgt); +#define iommu_dma_vunmap_noncontiguous(dev, vaddr) \ + vunmap(vaddr); +int iommu_dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, + size_t size, struct sg_table *sgt); +void iommu_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir); +void iommu_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir); +void iommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir); +void iommu_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, + int nelems, enum dma_data_direction dir); + +#endif /* _LINUX_IOMMU_DMA_H */ diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h index ffc3a949f837..30f832a60ccb 100644 --- a/include/linux/iommufd.h +++ b/include/linux/iommufd.h @@ -6,17 +6,17 @@ #ifndef __LINUX_IOMMUFD_H #define __LINUX_IOMMUFD_H -#include <linux/types.h> -#include <linux/errno.h> #include <linux/err.h> +#include <linux/errno.h> +#include <linux/types.h> struct device; -struct iommufd_device; -struct page; -struct iommufd_ctx; -struct iommufd_access; struct file; struct iommu_group; +struct iommufd_access; +struct iommufd_ctx; +struct iommufd_device; +struct page; struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx, struct device *dev, u32 *id); diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index db1249cd9692..b25377b6ea98 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task) { if (task->policy == SCHED_IDLE) return IOPRIO_CLASS_IDLE; - else if (task_is_realtime(task)) + else if (rt_or_dl_task_policy(task)) return IOPRIO_CLASS_RT; else return IOPRIO_CLASS_BE; diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h index f0e99fc7dd8b..2bd1661fe9ad 100644 --- a/include/linux/ioremap.h +++ b/include/linux/ioremap.h @@ -4,6 +4,7 @@ #include <linux/kasan.h> #include <asm/pgtable.h> +#include <asm/vmalloc.h> #if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP) /* diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h index 270454a6703d..c4aa58032faf 100644 --- a/include/linux/iov_iter.h +++ b/include/linux/iov_iter.h @@ -10,6 +10,7 @@ #include <linux/uio.h> #include <linux/bvec.h> +#include <linux/folio_queue.h> typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len, void *priv, void *priv2); @@ -141,6 +142,60 @@ size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2, } /* + * Handle ITER_FOLIOQ. + */ +static __always_inline +size_t iterate_folioq(struct iov_iter *iter, size_t len, void *priv, void *priv2, + iov_step_f step) +{ + const struct folio_queue *folioq = iter->folioq; + unsigned int slot = iter->folioq_slot; + size_t progress = 0, skip = iter->iov_offset; + + if (slot == folioq_nr_slots(folioq)) { + /* The iterator may have been extended. */ + folioq = folioq->next; + slot = 0; + } + + do { + struct folio *folio = folioq_folio(folioq, slot); + size_t part, remain, consumed; + size_t fsize; + void *base; + + if (!folio) + break; + + fsize = folioq_folio_size(folioq, slot); + base = kmap_local_folio(folio, skip); + part = umin(len, PAGE_SIZE - skip % PAGE_SIZE); + remain = step(base, progress, part, priv, priv2); + kunmap_local(base); + consumed = part - remain; + len -= consumed; + progress += consumed; + skip += consumed; + if (skip >= fsize) { + skip = 0; + slot++; + if (slot == folioq_nr_slots(folioq) && folioq->next) { + folioq = folioq->next; + slot = 0; + } + } + if (remain) + break; + } while (len); + + iter->folioq_slot = slot; + iter->folioq = folioq; + iter->iov_offset = skip; + iter->count -= progress; + return progress; +} + +/* * Handle ITER_XARRAY. */ static __always_inline @@ -249,6 +304,8 @@ size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv, return iterate_bvec(iter, len, priv, priv2, step); if (iov_iter_is_kvec(iter)) return iterate_kvec(iter, len, priv, priv2, step); + if (iov_iter_is_folioq(iter)) + return iterate_folioq(iter, len, priv, priv2, step); if (iov_iter_is_xarray(iter)) return iterate_xarray(iter, len, priv, priv2, step); return iterate_discard(iter, len, priv, priv2, step); @@ -271,4 +328,51 @@ size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv, return iterate_and_advance2(iter, len, priv, NULL, ustep, step); } +/** + * iterate_and_advance_kernel - Iterate over a kernel-internal iterator + * @iter: The iterator to iterate over. + * @len: The amount to iterate over. + * @priv: Data for the step functions. + * @priv2: More data for the step functions. + * @step: Function for other iterators; given kernel addresses. + * + * Iterate over the next part of an iterator, up to the specified length. The + * buffer is presented in segments, which for kernel iteration are broken up by + * physical pages and mapped, with the mapped address being presented. + * + * [!] Note This will only handle BVEC, KVEC, FOLIOQ, XARRAY and DISCARD-type + * iterators; it will not handle UBUF or IOVEC-type iterators. + * + * A step functions, @step, must be provided, one for handling mapped kernel + * addresses and the other is given user addresses which have the potential to + * fault since no pinning is performed. + * + * The step functions are passed the address and length of the segment, @priv, + * @priv2 and the amount of data so far iterated over (which can, for example, + * be added to @priv to point to the right part of a second buffer). The step + * functions should return the amount of the segment they didn't process (ie. 0 + * indicates complete processsing). + * + * This function returns the amount of data processed (ie. 0 means nothing was + * processed and the value of @len means processes to completion). + */ +static __always_inline +size_t iterate_and_advance_kernel(struct iov_iter *iter, size_t len, void *priv, + void *priv2, iov_step_f step) +{ + if (unlikely(iter->count < len)) + len = iter->count; + if (unlikely(!len)) + return 0; + if (iov_iter_is_bvec(iter)) + return iterate_bvec(iter, len, priv, priv2, step); + if (iov_iter_is_kvec(iter)) + return iterate_kvec(iter, len, priv, priv2, step); + if (iov_iter_is_folioq(iter)) + return iterate_folioq(iter, len, priv, priv2, step); + if (iov_iter_is_xarray(iter)) + return iterate_xarray(iter, len, priv, priv2, step); + return iterate_discard(iter, len, priv, priv2, step); +} + #endif /* _LINUX_IOV_ITER_H */ diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 383a0ea2ab91..a6e2aadbb91b 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -89,6 +89,7 @@ struct ipv6_devconf { __u8 ioam6_enabled; __u8 ndisc_evict_nocarrier; __u8 ra_honor_pio_life; + __u8 ra_honor_pio_pflag; struct ctl_table_header *sysctl_header; }; diff --git a/include/linux/irq.h b/include/linux/irq.h index 1f5dbf1f92c9..fa711f80957b 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h @@ -991,7 +991,6 @@ void irq_init_desc(unsigned int irq); * @ack: Ack register offset to reg_base * @eoi: Eoi register offset to reg_base * @type: Type configuration register offset to reg_base - * @polarity: Polarity configuration register offset to reg_base */ struct irq_chip_regs { unsigned long enable; @@ -1000,7 +999,6 @@ struct irq_chip_regs { unsigned long ack; unsigned long eoi; unsigned long type; - unsigned long polarity; }; /** @@ -1040,8 +1038,6 @@ struct irq_chip_type { * @irq_base: Interrupt base nr for this chip * @irq_cnt: Number of interrupts handled by this chip * @mask_cache: Cached mask register shared between all chip types - * @type_cache: Cached type register - * @polarity_cache: Cached polarity register * @wake_enabled: Interrupt can wakeup from suspend * @wake_active: Interrupt is marked as an wakeup from suspend source * @num_ct: Number of available irq_chip_type instances (usually 1) @@ -1068,8 +1064,6 @@ struct irq_chip_generic { unsigned int irq_base; unsigned int irq_cnt; u32 mask_cache; - u32 type_cache; - u32 polarity_cache; u32 wake_enabled; u32 wake_active; unsigned int num_ct; diff --git a/include/linux/irqchip/riscv-imsic.h b/include/linux/irqchip/riscv-imsic.h index faf0b800b1b0..7494952c5518 100644 --- a/include/linux/irqchip/riscv-imsic.h +++ b/include/linux/irqchip/riscv-imsic.h @@ -8,6 +8,8 @@ #include <linux/types.h> #include <linux/bitops.h> +#include <linux/device.h> +#include <linux/fwnode.h> #include <asm/csr.h> #define IMSIC_MMIO_PAGE_SHIFT 12 @@ -84,4 +86,11 @@ static inline const struct imsic_global_config *imsic_get_global_config(void) #endif +#ifdef CONFIG_ACPI +int imsic_platform_acpi_probe(struct fwnode_handle *fwnode); +struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev); +#else +static inline struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev) { return NULL; } +#endif + #endif diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index de6105f68fec..e432b6a12a32 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h @@ -291,7 +291,12 @@ struct irq_domain_chip_generic_info; * @hwirq_max: Maximum number of interrupts supported by controller * @direct_max: Maximum value of direct maps; * Use ~0 for no limit; 0 for no direct mapping + * @hwirq_base: The first hardware interrupt number (legacy domains only) + * @virq_base: The first Linux interrupt number for legacy domains to + * immediately associate the interrupts after domain creation * @bus_token: Domain bus token + * @name_suffix: Optional name suffix to avoid collisions when multiple + * domains are added using same fwnode * @ops: Domain operation callbacks * @host_data: Controller private data pointer * @dgc_info: Geneneric chip information structure pointer used to @@ -307,7 +312,10 @@ struct irq_domain_info { unsigned int size; irq_hw_number_t hwirq_max; int direct_max; + unsigned int hwirq_base; + unsigned int virq_base; enum irq_domain_bus_token bus_token; + const char *name_suffix; const struct irq_domain_ops *ops; void *host_data; #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 5157d92b6f23..8aef9bb6ad57 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h @@ -1086,7 +1086,7 @@ struct journal_s int j_revoke_records_per_block; /** - * @j_transaction_overhead: + * @j_transaction_overhead_buffers: * * Number of blocks each transaction needs for its own bookkeeping */ @@ -1675,7 +1675,7 @@ int jbd2_fc_get_buf(journal_t *journal, struct buffer_head **bh_out); int jbd2_submit_inode_data(journal_t *journal, struct jbd2_inode *jinode); int jbd2_wait_inode_data(journal_t *journal, struct jbd2_inode *jinode); int jbd2_fc_wait_bufs(journal_t *journal, int num_blks); -int jbd2_fc_release_bufs(journal_t *journal); +void jbd2_fc_release_bufs(journal_t *journal); /* * is_journal_abort diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index d9f1435a5a13..1220f0fbe5bf 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h @@ -418,7 +418,7 @@ extern unsigned long preset_lpj; #define NSEC_CONVERSION ((unsigned long)((((u64)1 << NSEC_JIFFIE_SC) +\ TICK_NSEC -1) / (u64)TICK_NSEC)) /* - * The maximum jiffie value is (MAX_INT >> 1). Here we translate that + * The maximum jiffy value is (MAX_INT >> 1). Here we translate that * into seconds. The 64-bit case will overflow if we are not careful, * so use the messy SH_DIV macro to do it. Still all constants. */ diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 70d6a8f6e25d..00a3bf7c0d8f 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -175,13 +175,59 @@ static __always_inline void * __must_check kasan_init_slab_obj( return (void *)object; } -bool __kasan_slab_free(struct kmem_cache *s, void *object, - unsigned long ip, bool init); +bool __kasan_slab_pre_free(struct kmem_cache *s, void *object, + unsigned long ip); +/** + * kasan_slab_pre_free - Check whether freeing a slab object is safe. + * @object: Object to be freed. + * + * This function checks whether freeing the given object is safe. It may + * check for double-free and invalid-free bugs and report them. + * + * This function is intended only for use by the slab allocator. + * + * @Return true if freeing the object is unsafe; false otherwise. + */ +static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s, + void *object) +{ + if (kasan_enabled()) + return __kasan_slab_pre_free(s, object, _RET_IP_); + return false; +} + +bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init, + bool still_accessible); +/** + * kasan_slab_free - Poison, initialize, and quarantine a slab object. + * @object: Object to be freed. + * @init: Whether to initialize the object. + * @still_accessible: Whether the object contents are still accessible. + * + * This function informs that a slab object has been freed and is not + * supposed to be accessed anymore, except when @still_accessible is set + * (indicating that the object is in a SLAB_TYPESAFE_BY_RCU cache and an RCU + * grace period might not have passed yet). + * + * For KASAN modes that have integrated memory initialization + * (kasan_has_integrated_init() == true), this function also initializes + * the object's memory. For other modes, the @init argument is ignored. + * + * This function might also take ownership of the object to quarantine it. + * When this happens, KASAN will defer freeing the object to a later + * stage and handle it internally until then. The return value indicates + * whether KASAN took ownership of the object. + * + * This function is intended only for use by the slab allocator. + * + * @Return true if KASAN took ownership of the object; false otherwise. + */ static __always_inline bool kasan_slab_free(struct kmem_cache *s, - void *object, bool init) + void *object, bool init, + bool still_accessible) { if (kasan_enabled()) - return __kasan_slab_free(s, object, _RET_IP_, init); + return __kasan_slab_free(s, object, init, still_accessible); return false; } @@ -371,7 +417,14 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache, { return (void *)object; } -static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init) + +static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object) +{ + return false; +} + +static inline bool kasan_slab_free(struct kmem_cache *s, void *object, + bool init, bool still_accessible) { return false; } diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h index 859f4b0c1b2b..196778a087c4 100644 --- a/include/linux/kernel-page-flags.h +++ b/include/linux/kernel-page-flags.h @@ -10,12 +10,11 @@ */ #define KPF_RESERVED 32 #define KPF_MLOCKED 33 -#define KPF_MAPPEDTODISK 34 +#define KPF_OWNER_2 34 #define KPF_PRIVATE 35 #define KPF_PRIVATE_2 36 #define KPF_OWNER_PRIVATE 37 #define KPF_ARCH 38 -#define KPF_UNCACHED 39 #define KPF_SOFTDIRTY 40 #define KPF_ARCH_2 41 #define KPF_ARCH_3 42 diff --git a/include/linux/key.h b/include/linux/key.h index 943a432da3ae..074dca3222b9 100644 --- a/include/linux/key.h +++ b/include/linux/key.h @@ -436,9 +436,6 @@ extern key_ref_t keyring_search(key_ref_t keyring, const char *description, bool recurse); -extern int keyring_add_key(struct key *keyring, - struct key *key); - extern int keyring_restrict(key_ref_t keyring, const char *type, const char *restriction); diff --git a/include/linux/kfence.h b/include/linux/kfence.h index 88100cc9caba..0ad1ddbb8b99 100644 --- a/include/linux/kfence.h +++ b/include/linux/kfence.h @@ -124,7 +124,7 @@ static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp if (!static_branch_likely(&kfence_allocation_key)) return NULL; #endif - if (likely(atomic_read(&kfence_allocation_gate))) + if (likely(atomic_read(&kfence_allocation_gate) > 0)) return NULL; return __kfence_alloc(s, size, flags); } diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index f68865e19b0b..30baae91b225 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -4,6 +4,7 @@ #include <linux/sched/coredump.h> /* MMF_VM_HUGEPAGE */ +extern unsigned int khugepaged_max_ptes_none __read_mostly; #ifdef CONFIG_TRANSPARENT_HUGEPAGE extern struct attribute_group khugepaged_attr_group; diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h index 906521c2329c..6055fc969877 100644 --- a/include/linux/kmsg_dump.h +++ b/include/linux/kmsg_dump.h @@ -40,6 +40,17 @@ struct kmsg_dump_iter { }; /** + * struct kmsg_dump_detail - kernel crash detail + * @reason: reason for the crash, see kmsg_dump_reason. + * @description: optional short string, to provide additional information. + */ + +struct kmsg_dump_detail { + enum kmsg_dump_reason reason; + const char *description; +}; + +/** * struct kmsg_dumper - kernel crash message dumper structure * @list: Entry in the dumper list (private) * @dump: Call into dumping code which will retrieve the data with @@ -49,13 +60,13 @@ struct kmsg_dump_iter { */ struct kmsg_dumper { struct list_head list; - void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason); + void (*dump)(struct kmsg_dumper *dumper, struct kmsg_dump_detail *detail); enum kmsg_dump_reason max_reason; bool registered; }; #ifdef CONFIG_PRINTK -void kmsg_dump(enum kmsg_dump_reason reason); +void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc); bool kmsg_dump_get_line(struct kmsg_dump_iter *iter, bool syslog, char *line, size_t size, size_t *len); @@ -71,7 +82,7 @@ int kmsg_dump_unregister(struct kmsg_dumper *dumper); const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason); #else -static inline void kmsg_dump(enum kmsg_dump_reason reason) +static inline void kmsg_dump_desc(enum kmsg_dump_reason reason, const char *desc) { } @@ -107,4 +118,9 @@ static inline const char *kmsg_dump_reason_str(enum kmsg_dump_reason reason) } #endif +static inline void kmsg_dump(enum kmsg_dump_reason reason) +{ + kmsg_dump_desc(reason, NULL); +} + #endif /* _LINUX_KMSG_DUMP_H */ diff --git a/include/linux/leds.h b/include/linux/leds.h index 6885603f211b..e5968c3ed4ae 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -611,6 +611,8 @@ enum led_trigger_netdev_modes { TRIGGER_NETDEV_FULL_DUPLEX, TRIGGER_NETDEV_TX, TRIGGER_NETDEV_RX, + TRIGGER_NETDEV_TX_ERR, + TRIGGER_NETDEV_RX_ERR, /* Keep last */ __TRIGGER_NETDEV_MAX, diff --git a/include/linux/libata.h b/include/linux/libata.h index 17394098bee9..9b4a6ff03235 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -55,6 +55,46 @@ /* defines only for the constants which don't work well as enums */ #define ATA_TAG_POISON 0xfafbfcfdU +/* + * Quirk flags bits. + * ata_device->quirks is an unsigned int, so __ATA_QUIRK_MAX must not exceed 32. + */ +enum ata_quirks { + __ATA_QUIRK_DIAGNOSTIC, /* Failed boot diag */ + __ATA_QUIRK_NODMA, /* DMA problems */ + __ATA_QUIRK_NONCQ, /* Don't use NCQ */ + __ATA_QUIRK_MAX_SEC_128, /* Limit max sects to 128 */ + __ATA_QUIRK_BROKEN_HPA, /* Broken HPA */ + __ATA_QUIRK_DISABLE, /* Disable it */ + __ATA_QUIRK_HPA_SIZE, /* Native size off by one */ + __ATA_QUIRK_IVB, /* cbl det validity bit bugs */ + __ATA_QUIRK_STUCK_ERR, /* Stuck ERR on next PACKET */ + __ATA_QUIRK_BRIDGE_OK, /* No bridge limits */ + __ATA_QUIRK_ATAPI_MOD16_DMA, /* Use ATAPI DMA for commands that */ + /* are not a multiple of 16 bytes */ + __ATA_QUIRK_FIRMWARE_WARN, /* Firmware update warning */ + __ATA_QUIRK_1_5_GBPS, /* Force 1.5 Gbps */ + __ATA_QUIRK_NOSETXFER, /* Skip SETXFER, SATA only */ + __ATA_QUIRK_BROKEN_FPDMA_AA, /* Skip AA */ + __ATA_QUIRK_DUMP_ID, /* Dump IDENTIFY data */ + __ATA_QUIRK_MAX_SEC_LBA48, /* Set max sects to 65535 */ + __ATA_QUIRK_ATAPI_DMADIR, /* Device requires dmadir */ + __ATA_QUIRK_NO_NCQ_TRIM, /* Do not use queued TRIM */ + __ATA_QUIRK_NOLPM, /* Do not use LPM */ + __ATA_QUIRK_WD_BROKEN_LPM, /* Some WDs have broken LPM */ + __ATA_QUIRK_ZERO_AFTER_TRIM, /* Guarantees zero after trim */ + __ATA_QUIRK_NO_DMA_LOG, /* Do not use DMA for log read */ + __ATA_QUIRK_NOTRIM, /* Do not use TRIM */ + __ATA_QUIRK_MAX_SEC_1024, /* Limit max sects to 1024 */ + __ATA_QUIRK_MAX_TRIM_128M, /* Limit max trim size to 128M */ + __ATA_QUIRK_NO_NCQ_ON_ATI, /* Disable NCQ on ATI chipset */ + __ATA_QUIRK_NO_ID_DEV_LOG, /* Identify device log missing */ + __ATA_QUIRK_NO_LOG_DIR, /* Do not read log directory */ + __ATA_QUIRK_NO_FUA, /* Do not use FUA */ + + __ATA_QUIRK_MAX, +}; + enum { /* various global constants */ LIBATA_MAX_PRD = ATA_MAX_PRD / 2, @@ -338,6 +378,7 @@ enum { ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */ + ATA_EHI_DID_PRINT_QUIRKS = (1 << 21), /* already printed quirks info */ ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, @@ -362,43 +403,42 @@ enum { */ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8, - /* Horkage types. May be set by libata or controller on drives - (some horkage may be drive/controller pair dependent */ - - ATA_HORKAGE_DIAGNOSTIC = (1 << 0), /* Failed boot diag */ - ATA_HORKAGE_NODMA = (1 << 1), /* DMA problems */ - ATA_HORKAGE_NONCQ = (1 << 2), /* Don't use NCQ */ - ATA_HORKAGE_MAX_SEC_128 = (1 << 3), /* Limit max sects to 128 */ - ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ - ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ - ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ - ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ - ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ - ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ - ATA_HORKAGE_ATAPI_MOD16_DMA = (1 << 11), /* use ATAPI DMA for commands - not multiple of 16 bytes */ - ATA_HORKAGE_FIRMWARE_WARN = (1 << 12), /* firmware update warning */ - ATA_HORKAGE_1_5_GBPS = (1 << 13), /* force 1.5 Gbps */ - ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ - ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ - ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ - ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ - ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */ - ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ - ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ - ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ - ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ - ATA_HORKAGE_NO_DMA_LOG = (1 << 23), /* don't use DMA for log read */ - ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */ - ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */ - ATA_HORKAGE_MAX_TRIM_128M = (1 << 26), /* Limit max trim size to 128M */ - ATA_HORKAGE_NO_NCQ_ON_ATI = (1 << 27), /* Disable NCQ on ATI chipset */ - ATA_HORKAGE_NO_ID_DEV_LOG = (1 << 28), /* Identify device log missing */ - ATA_HORKAGE_NO_LOG_DIR = (1 << 29), /* Do not read log directory */ - ATA_HORKAGE_NO_FUA = (1 << 30), /* Do not use FUA */ - - /* DMA mask for user DMA control: User visible values; DO NOT - renumber */ + /* + * Quirk flags: may be set by libata or controller drivers on drives. + * Some quirks may be drive/controller pair dependent. + */ + ATA_QUIRK_DIAGNOSTIC = (1U << __ATA_QUIRK_DIAGNOSTIC), + ATA_QUIRK_NODMA = (1U << __ATA_QUIRK_NODMA), + ATA_QUIRK_NONCQ = (1U << __ATA_QUIRK_NONCQ), + ATA_QUIRK_MAX_SEC_128 = (1U << __ATA_QUIRK_MAX_SEC_128), + ATA_QUIRK_BROKEN_HPA = (1U << __ATA_QUIRK_BROKEN_HPA), + ATA_QUIRK_DISABLE = (1U << __ATA_QUIRK_DISABLE), + ATA_QUIRK_HPA_SIZE = (1U << __ATA_QUIRK_HPA_SIZE), + ATA_QUIRK_IVB = (1U << __ATA_QUIRK_IVB), + ATA_QUIRK_STUCK_ERR = (1U << __ATA_QUIRK_STUCK_ERR), + ATA_QUIRK_BRIDGE_OK = (1U << __ATA_QUIRK_BRIDGE_OK), + ATA_QUIRK_ATAPI_MOD16_DMA = (1U << __ATA_QUIRK_ATAPI_MOD16_DMA), + ATA_QUIRK_FIRMWARE_WARN = (1U << __ATA_QUIRK_FIRMWARE_WARN), + ATA_QUIRK_1_5_GBPS = (1U << __ATA_QUIRK_1_5_GBPS), + ATA_QUIRK_NOSETXFER = (1U << __ATA_QUIRK_NOSETXFER), + ATA_QUIRK_BROKEN_FPDMA_AA = (1U << __ATA_QUIRK_BROKEN_FPDMA_AA), + ATA_QUIRK_DUMP_ID = (1U << __ATA_QUIRK_DUMP_ID), + ATA_QUIRK_MAX_SEC_LBA48 = (1U << __ATA_QUIRK_MAX_SEC_LBA48), + ATA_QUIRK_ATAPI_DMADIR = (1U << __ATA_QUIRK_ATAPI_DMADIR), + ATA_QUIRK_NO_NCQ_TRIM = (1U << __ATA_QUIRK_NO_NCQ_TRIM), + ATA_QUIRK_NOLPM = (1U << __ATA_QUIRK_NOLPM), + ATA_QUIRK_WD_BROKEN_LPM = (1U << __ATA_QUIRK_WD_BROKEN_LPM), + ATA_QUIRK_ZERO_AFTER_TRIM = (1U << __ATA_QUIRK_ZERO_AFTER_TRIM), + ATA_QUIRK_NO_DMA_LOG = (1U << __ATA_QUIRK_NO_DMA_LOG), + ATA_QUIRK_NOTRIM = (1U << __ATA_QUIRK_NOTRIM), + ATA_QUIRK_MAX_SEC_1024 = (1U << __ATA_QUIRK_MAX_SEC_1024), + ATA_QUIRK_MAX_TRIM_128M = (1U << __ATA_QUIRK_MAX_TRIM_128M), + ATA_QUIRK_NO_NCQ_ON_ATI = (1U << __ATA_QUIRK_NO_NCQ_ON_ATI), + ATA_QUIRK_NO_ID_DEV_LOG = (1U << __ATA_QUIRK_NO_ID_DEV_LOG), + ATA_QUIRK_NO_LOG_DIR = (1U << __ATA_QUIRK_NO_LOG_DIR), + ATA_QUIRK_NO_FUA = (1U << __ATA_QUIRK_NO_FUA), + + /* User visible DMA mask for DMA control. DO NOT renumber. */ ATA_DMA_MASK_ATA = (1 << 0), /* DMA on ATA Disk */ ATA_DMA_MASK_ATAPI = (1 << 1), /* DMA on ATAPI */ ATA_DMA_MASK_CFA = (1 << 2), /* DMA on CF Card */ @@ -660,10 +700,25 @@ struct ata_cpr_log { struct ata_cpr cpr[] __counted_by(nr_cpr); }; +struct ata_cdl { + /* + * Buffer to cache the CDL log page 18h (command duration descriptors) + * for SCSI-ATA translation. + */ + u8 desc_log_buf[ATA_LOG_CDL_SIZE]; + + /* + * Buffer to handle reading the sense data for successful NCQ Commands + * log page for commands using a CDL with one of the limits policy set + * to 0xD (successful completion with sense data available bit set). + */ + u8 ncq_sense_log_buf[ATA_LOG_SENSE_NCQ_SIZE]; +}; + struct ata_device { struct ata_link *link; unsigned int devno; /* 0 or 1 */ - unsigned int horkage; /* List of broken features */ + unsigned int quirks; /* List of broken features */ unsigned long flags; /* ATA_DFLAG_xxx */ struct scsi_device *sdev; /* attached SCSI device */ void *private_data; @@ -722,13 +777,16 @@ struct ata_device { /* Concurrent positioning ranges */ struct ata_cpr_log *cpr_log; - /* Command Duration Limits log support */ - u8 cdl[ATA_LOG_CDL_SIZE]; + /* Command Duration Limits support */ + struct ata_cdl *cdl; /* error history */ int spdn_cnt; /* ering is CLEAR_END, read comment above CLEAR_END */ struct ata_ering ering; + + /* For EH */ + u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; }; /* Fields between ATA_DEVICE_CLEAR_BEGIN and ATA_DEVICE_CLEAR_END are @@ -874,9 +932,6 @@ struct ata_port { #ifdef CONFIG_ATA_ACPI struct ata_acpi_gtm __acpi_init_gtm; /* use ata_acpi_init_gtm() */ #endif - /* owned by EH */ - u8 *ncq_sense_buf; - u8 sector_buf[ATA_SECT_SIZE] ____cacheline_aligned; }; /* The following initializer overrides a method to NULL whether one of @@ -1064,8 +1119,6 @@ static inline bool ata_port_is_frozen(const struct ata_port *ap) extern int ata_std_prereset(struct ata_link *link, unsigned long deadline); extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline, int (*check_ready)(struct ata_link *link)); -extern int sata_std_hardreset(struct ata_link *link, unsigned int *class, - unsigned long deadline); extern void ata_std_postreset(struct ata_link *link, unsigned int *classes); extern struct ata_host *ata_host_alloc(struct device *dev, int n_ports); @@ -1129,7 +1182,6 @@ extern int ata_xfer_mode2shift(u8 xfer_mode); extern const char *ata_mode_string(unsigned int xfer_mask); extern unsigned int ata_id_xfermask(const u16 *id); extern int ata_std_qc_defer(struct ata_queued_cmd *qc); -extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc); extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg, unsigned int n_elem); extern unsigned int ata_dev_classify(const struct ata_taskfile *tf); @@ -1190,12 +1242,13 @@ extern int sata_scr_read(struct ata_link *link, int reg, u32 *val); extern int sata_scr_write(struct ata_link *link, int reg, u32 val); extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val); extern int sata_set_spd(struct ata_link *link); +int sata_std_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline); extern int sata_link_hardreset(struct ata_link *link, const unsigned int *timing, unsigned long deadline, bool *online, int (*check_ready)(struct ata_link *)); extern int sata_link_resume(struct ata_link *link, const unsigned int *params, unsigned long deadline); -extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link); extern void ata_eh_analyze_ncq_error(struct ata_link *link); #else static inline const unsigned int * @@ -1217,6 +1270,11 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val) return -EOPNOTSUPP; } static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; } +static inline int sata_std_hardreset(struct ata_link *link, unsigned int *class, + unsigned long deadline) +{ + return -EOPNOTSUPP; +} static inline int sata_link_hardreset(struct ata_link *link, const unsigned int *timing, unsigned long deadline, @@ -1233,10 +1291,6 @@ static inline int sata_link_resume(struct ata_link *link, { return -EOPNOTSUPP; } -static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link) -{ - return -EOPNOTSUPP; -} static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { } #endif extern int sata_link_debounce(struct ata_link *link, @@ -1967,7 +2021,6 @@ extern unsigned int ata_sff_data_xfer(struct ata_queued_cmd *qc, extern unsigned int ata_sff_data_xfer32(struct ata_queued_cmd *qc, unsigned char *buf, unsigned int buflen, int rw); extern void ata_sff_irq_on(struct ata_port *ap); -extern void ata_sff_irq_clear(struct ata_port *ap); extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, u8 status, int in_wq); extern void ata_sff_queue_work(struct work_struct *work); diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h index d94bfd9ac8cc..3b9de09871f6 100644 --- a/include/linux/linkmode.h +++ b/include/linux/linkmode.h @@ -37,8 +37,9 @@ static inline bool linkmode_empty(const unsigned long *src) return bitmap_empty(src, __ETHTOOL_LINK_MODE_MASK_NBITS); } -static inline int linkmode_andnot(unsigned long *dst, const unsigned long *src1, - const unsigned long *src2) +static inline bool linkmode_andnot(unsigned long *dst, + const unsigned long *src1, + const unsigned long *src2) { return bitmap_andnot(dst, src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS); } diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index 1b95fe31051f..61c4b9c41904 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h @@ -200,7 +200,7 @@ extern const struct svc_procedure nlmsvc_procedures[24]; extern const struct svc_procedure nlmsvc_procedures4[24]; #endif extern int nlmsvc_grace_period; -extern unsigned long nlmsvc_timeout; +extern unsigned long nlm_timeout; extern bool nsm_use_hostnames; extern u32 nsm_local_state; diff --git a/include/linux/lru_cache.h b/include/linux/lru_cache.h index c9afcdd9324c..ff82ef85a084 100644 --- a/include/linux/lru_cache.h +++ b/include/linux/lru_cache.h @@ -119,7 +119,7 @@ write intent log information, three of which are mentioned here. */ /* this defines an element in a tracked set - * .colision is for hash table lookup. + * .collision is for hash table lookup. * When we process a new IO request, we know its sector, thus can deduce the * region number (label) easily. To do the label -> object lookup without a * full list walk, we use a simple hash table. @@ -145,7 +145,7 @@ write intent log information, three of which are mentioned here. * But it avoids high order page allocations in kmalloc. */ struct lc_element { - struct hlist_node colision; + struct hlist_node collision; struct list_head list; /* LRU list or free list */ unsigned refcnt; /* back "pointer" into lc_cache->element[index], diff --git a/include/linux/lsm_count.h b/include/linux/lsm_count.h new file mode 100644 index 000000000000..16eb49761b25 --- /dev/null +++ b/include/linux/lsm_count.h @@ -0,0 +1,135 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (C) 2023 Google LLC. + */ + +#ifndef __LINUX_LSM_COUNT_H +#define __LINUX_LSM_COUNT_H + +#include <linux/args.h> + +#ifdef CONFIG_SECURITY + +/* + * Macros to count the number of LSMs enabled in the kernel at compile time. + */ + +/* + * Capabilities is enabled when CONFIG_SECURITY is enabled. + */ +#if IS_ENABLED(CONFIG_SECURITY) +#define CAPABILITIES_ENABLED 1, +#else +#define CAPABILITIES_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_SELINUX) +#define SELINUX_ENABLED 1, +#else +#define SELINUX_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_SMACK) +#define SMACK_ENABLED 1, +#else +#define SMACK_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_APPARMOR) +#define APPARMOR_ENABLED 1, +#else +#define APPARMOR_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_TOMOYO) +#define TOMOYO_ENABLED 1, +#else +#define TOMOYO_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_YAMA) +#define YAMA_ENABLED 1, +#else +#define YAMA_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_LOADPIN) +#define LOADPIN_ENABLED 1, +#else +#define LOADPIN_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_LOCKDOWN_LSM) +#define LOCKDOWN_ENABLED 1, +#else +#define LOCKDOWN_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_SAFESETID) +#define SAFESETID_ENABLED 1, +#else +#define SAFESETID_ENABLED +#endif + +#if IS_ENABLED(CONFIG_BPF_LSM) +#define BPF_LSM_ENABLED 1, +#else +#define BPF_LSM_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_LANDLOCK) +#define LANDLOCK_ENABLED 1, +#else +#define LANDLOCK_ENABLED +#endif + +#if IS_ENABLED(CONFIG_IMA) +#define IMA_ENABLED 1, +#else +#define IMA_ENABLED +#endif + +#if IS_ENABLED(CONFIG_EVM) +#define EVM_ENABLED 1, +#else +#define EVM_ENABLED +#endif + +#if IS_ENABLED(CONFIG_SECURITY_IPE) +#define IPE_ENABLED 1, +#else +#define IPE_ENABLED +#endif + +/* + * There is a trailing comma that we need to be accounted for. This is done by + * using a skipped argument in __COUNT_LSMS + */ +#define __COUNT_LSMS(skipped_arg, args...) COUNT_ARGS(args...) +#define COUNT_LSMS(args...) __COUNT_LSMS(args) + +#define MAX_LSM_COUNT \ + COUNT_LSMS( \ + CAPABILITIES_ENABLED \ + SELINUX_ENABLED \ + SMACK_ENABLED \ + APPARMOR_ENABLED \ + TOMOYO_ENABLED \ + YAMA_ENABLED \ + LOADPIN_ENABLED \ + LOCKDOWN_ENABLED \ + SAFESETID_ENABLED \ + BPF_LSM_ENABLED \ + LANDLOCK_ENABLED \ + IMA_ENABLED \ + EVM_ENABLED \ + IPE_ENABLED) + +#else + +#define MAX_LSM_COUNT 0 + +#endif /* CONFIG_SECURITY */ + +#endif /* __LINUX_LSM_COUNT_H */ diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h index 855db460e08b..9eca013aa5e1 100644 --- a/include/linux/lsm_hook_defs.h +++ b/include/linux/lsm_hook_defs.h @@ -48,7 +48,7 @@ LSM_HOOK(int, 0, quota_on, struct dentry *dentry) LSM_HOOK(int, 0, syslog, int type) LSM_HOOK(int, 0, settime, const struct timespec64 *ts, const struct timezone *tz) -LSM_HOOK(int, 1, vm_enough_memory, struct mm_struct *mm, long pages) +LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages) LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm) LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file) LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm) @@ -114,6 +114,7 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask, unsigned int obj_type) LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode) LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode) +LSM_HOOK(void, LSM_RET_VOID, inode_free_security_rcu, void *inode_security) LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode, struct inode *dir, const struct qstr *qstr, struct xattr *xattrs, int *xattr_count) @@ -179,6 +180,8 @@ LSM_HOOK(void, LSM_RET_VOID, inode_getsecid, struct inode *inode, u32 *secid) LSM_HOOK(int, 0, inode_copy_up, struct dentry *src, struct cred **new) LSM_HOOK(int, -EOPNOTSUPP, inode_copy_up_xattr, struct dentry *src, const char *name) +LSM_HOOK(int, 0, inode_setintegrity, const struct inode *inode, + enum lsm_integrity_type type, const void *value, size_t size) LSM_HOOK(int, 0, kernfs_init_security, struct kernfs_node *kn_dir, struct kernfs_node *kn) LSM_HOOK(int, 0, file_permission, struct file *file, int mask) @@ -353,8 +356,7 @@ LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_inc, void) LSM_HOOK(void, LSM_RET_VOID, secmark_refcount_dec, void) LSM_HOOK(void, LSM_RET_VOID, req_classify_flow, const struct request_sock *req, struct flowi_common *flic) -LSM_HOOK(int, 0, tun_dev_alloc_security, void **security) -LSM_HOOK(void, LSM_RET_VOID, tun_dev_free_security, void *security) +LSM_HOOK(int, 0, tun_dev_alloc_security, void *security) LSM_HOOK(int, 0, tun_dev_create, void) LSM_HOOK(int, 0, tun_dev_attach_queue, void *security) LSM_HOOK(int, 0, tun_dev_attach, struct sock *sk, void *security) @@ -374,8 +376,7 @@ LSM_HOOK(int, 0, mptcp_add_subflow, struct sock *sk, struct sock *ssk) LSM_HOOK(int, 0, ib_pkey_access, void *sec, u64 subnet_prefix, u16 pkey) LSM_HOOK(int, 0, ib_endport_manage_subnet, void *sec, const char *dev_name, u8 port_num) -LSM_HOOK(int, 0, ib_alloc_security, void **sec) -LSM_HOOK(void, LSM_RET_VOID, ib_free_security, void *sec) +LSM_HOOK(int, 0, ib_alloc_security, void *sec) #endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM @@ -403,7 +404,6 @@ LSM_HOOK(int, 0, xfrm_decode_session, struct sk_buff *skb, u32 *secid, #ifdef CONFIG_KEYS LSM_HOOK(int, 0, key_alloc, struct key *key, const struct cred *cred, unsigned long flags) -LSM_HOOK(void, LSM_RET_VOID, key_free, struct key *key) LSM_HOOK(int, 0, key_permission, key_ref_t key_ref, const struct cred *cred, enum key_need_perm need_perm) LSM_HOOK(int, 0, key_getsecurity, struct key *key, char **buffer) @@ -431,7 +431,7 @@ LSM_HOOK(int, 0, bpf_prog_load, struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token) LSM_HOOK(void, LSM_RET_VOID, bpf_prog_free, struct bpf_prog *prog) LSM_HOOK(int, 0, bpf_token_create, struct bpf_token *token, union bpf_attr *attr, - struct path *path) + const struct path *path) LSM_HOOK(void, LSM_RET_VOID, bpf_token_free, struct bpf_token *token) LSM_HOOK(int, 0, bpf_token_cmd, const struct bpf_token *token, enum bpf_cmd cmd) LSM_HOOK(int, 0, bpf_token_capable, const struct bpf_token *token, int cap) @@ -442,7 +442,6 @@ LSM_HOOK(int, 0, locked_down, enum lockdown_reason what) #ifdef CONFIG_PERF_EVENTS LSM_HOOK(int, 0, perf_event_open, struct perf_event_attr *attr, int type) LSM_HOOK(int, 0, perf_event_alloc, struct perf_event *event) -LSM_HOOK(void, LSM_RET_VOID, perf_event_free, struct perf_event *event) LSM_HOOK(int, 0, perf_event_read, struct perf_event *event) LSM_HOOK(int, 0, perf_event_write, struct perf_event *event) #endif /* CONFIG_PERF_EVENTS */ @@ -452,3 +451,10 @@ LSM_HOOK(int, 0, uring_override_creds, const struct cred *new) LSM_HOOK(int, 0, uring_sqpoll, void) LSM_HOOK(int, 0, uring_cmd, struct io_uring_cmd *ioucmd) #endif /* CONFIG_IO_URING */ + +LSM_HOOK(void, LSM_RET_VOID, initramfs_populated, void) + +LSM_HOOK(int, 0, bdev_alloc_security, struct block_device *bdev) +LSM_HOOK(void, LSM_RET_VOID, bdev_free_security, struct block_device *bdev) +LSM_HOOK(int, 0, bdev_setintegrity, struct block_device *bdev, + enum lsm_integrity_type type, const void *value, size_t size) diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index a2ade0ffe9e7..090d1d3e19fe 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -30,19 +30,47 @@ #include <linux/init.h> #include <linux/rculist.h> #include <linux/xattr.h> +#include <linux/static_call.h> +#include <linux/unroll.h> +#include <linux/jump_label.h> +#include <linux/lsm_count.h> union security_list_options { #define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__); #include "lsm_hook_defs.h" #undef LSM_HOOK + void *lsm_func_addr; }; -struct security_hook_heads { - #define LSM_HOOK(RET, DEFAULT, NAME, ...) struct hlist_head NAME; - #include "lsm_hook_defs.h" - #undef LSM_HOOK +/* + * @key: static call key as defined by STATIC_CALL_KEY + * @trampoline: static call trampoline as defined by STATIC_CALL_TRAMP + * @hl: The security_hook_list as initialized by the owning LSM. + * @active: Enabled when the static call has an LSM hook associated. + */ +struct lsm_static_call { + struct static_call_key *key; + void *trampoline; + struct security_hook_list *hl; + /* this needs to be true or false based on what the key defaults to */ + struct static_key_false *active; } __randomize_layout; +/* + * Table of the static calls for each LSM hook. + * Once the LSMs are initialized, their callbacks will be copied to these + * tables such that the calls are filled backwards (from last to first). + * This way, we can jump directly to the first used static call, and execute + * all of them after. This essentially makes the entry point + * dynamic to adapt the number of static calls to the number of callbacks. + */ +struct lsm_static_calls_table { + #define LSM_HOOK(RET, DEFAULT, NAME, ...) \ + struct lsm_static_call NAME[MAX_LSM_COUNT]; + #include <linux/lsm_hook_defs.h> + #undef LSM_HOOK +} __packed __randomize_layout; + /** * struct lsm_id - Identify a Linux Security Module. * @lsm: name of the LSM, must be approved by the LSM maintainers @@ -51,53 +79,45 @@ struct security_hook_heads { * Contains the information that identifies the LSM. */ struct lsm_id { - const char *name; - u64 id; + const char *name; + u64 id; }; /* * Security module hook list structure. * For use with generic list macros for common operations. + * + * struct security_hook_list - Contents of a cacheable, mappable object. + * @scalls: The beginning of the array of static calls assigned to this hook. + * @hook: The callback for the hook. + * @lsm: The name of the lsm that owns this hook. */ struct security_hook_list { - struct hlist_node list; - struct hlist_head *head; - union security_list_options hook; - const struct lsm_id *lsmid; + struct lsm_static_call *scalls; + union security_list_options hook; + const struct lsm_id *lsmid; } __randomize_layout; /* * Security blob size or offset data. */ struct lsm_blob_sizes { - int lbs_cred; - int lbs_file; - int lbs_inode; - int lbs_superblock; - int lbs_ipc; - int lbs_msg_msg; - int lbs_task; - int lbs_xattr_count; /* number of xattr slots in new_xattrs array */ + int lbs_cred; + int lbs_file; + int lbs_ib; + int lbs_inode; + int lbs_sock; + int lbs_superblock; + int lbs_ipc; + int lbs_key; + int lbs_msg_msg; + int lbs_perf_event; + int lbs_task; + int lbs_xattr_count; /* number of xattr slots in new_xattrs array */ + int lbs_tun_dev; + int lbs_bdev; }; -/** - * lsm_get_xattr_slot - Return the next available slot and increment the index - * @xattrs: array storing LSM-provided xattrs - * @xattr_count: number of already stored xattrs (updated) - * - * Retrieve the first available slot in the @xattrs array to fill with an xattr, - * and increment @xattr_count. - * - * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise. - */ -static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs, - int *xattr_count) -{ - if (unlikely(!xattrs)) - return NULL; - return &xattrs[(*xattr_count)++]; -} - /* * LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void * LSM hooks (in include/linux/lsm_hook_defs.h). @@ -110,11 +130,11 @@ static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs, * care of the common case and reduces the amount of * text involved. */ -#define LSM_HOOK_INIT(HEAD, HOOK) \ - { .head = &security_hook_heads.HEAD, .hook = { .HEAD = HOOK } } - -extern struct security_hook_heads security_hook_heads; -extern char *lsm_names; +#define LSM_HOOK_INIT(NAME, HOOK) \ + { \ + .scalls = static_calls_table.NAME, \ + .hook = { .NAME = HOOK } \ + } extern void security_add_hooks(struct security_hook_list *hooks, int count, const struct lsm_id *lsmid); @@ -137,9 +157,6 @@ struct lsm_info { struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */ }; -extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; -extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[]; - #define DEFINE_LSM(lsm) \ static struct lsm_info __lsm_##lsm \ __used __section(".lsm_info.init") \ @@ -150,6 +167,28 @@ extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[]; __used __section(".early_lsm_info.init") \ __aligned(sizeof(unsigned long)) -extern int lsm_inode_alloc(struct inode *inode); +/* DO NOT tamper with these variables outside of the LSM framework */ +extern char *lsm_names; +extern struct lsm_static_calls_table static_calls_table __ro_after_init; +extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; +extern struct lsm_info __start_early_lsm_info[], __end_early_lsm_info[]; + +/** + * lsm_get_xattr_slot - Return the next available slot and increment the index + * @xattrs: array storing LSM-provided xattrs + * @xattr_count: number of already stored xattrs (updated) + * + * Retrieve the first available slot in the @xattrs array to fill with an xattr, + * and increment @xattr_count. + * + * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise. + */ +static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs, + int *xattr_count) +{ + if (unlikely(!xattrs)) + return NULL; + return &xattrs[(*xattr_count)++]; +} #endif /* ! __LINUX_LSM_HOOKS_H */ diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index a53ad4dabd7e..c2c11004085e 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -52,9 +52,9 @@ * bit in the node type. This is possible by using bit 1 to indicate if bit 2 * is part of the type or the slot. * - * Once the type is decided, the decision of an allocation range type or a range - * type is done by examining the immutable tree flag for the MAPLE_ALLOC_RANGE - * flag. + * Once the type is decided, the decision of an allocation range type or a + * range type is done by examining the immutable tree flag for the + * MT_FLAGS_ALLOC_RANGE flag. * * Node types: * 0x??1 = Root @@ -148,6 +148,18 @@ enum maple_type { maple_arange_64, }; +enum store_type { + wr_invalid, + wr_new_root, + wr_store_root, + wr_exact_fit, + wr_spanning_store, + wr_split_store, + wr_rebalance, + wr_append, + wr_node_store, + wr_slot_store, +}; /** * DOC: Maple tree flags @@ -436,6 +448,7 @@ struct ma_state { unsigned char offset; unsigned char mas_flags; unsigned char end; /* The end of the node */ + enum store_type store_type; /* The type of store needed for this operation */ }; struct ma_wr_state { @@ -477,6 +490,7 @@ struct ma_wr_state { .max = ULONG_MAX, \ .alloc = NULL, \ .mas_flags = 0, \ + .store_type = wr_invalid, \ } #define MA_WR_STATE(name, ma_state, wr_entry) \ diff --git a/include/linux/memblock.h b/include/linux/memblock.h index fc4d75c6cec3..673d5cae7c81 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h @@ -467,6 +467,7 @@ static inline __init_memblock bool memblock_bottom_up(void) phys_addr_t memblock_phys_mem_size(void); phys_addr_t memblock_reserved_size(void); +unsigned long memblock_estimated_nr_free_pages(void); phys_addr_t memblock_start_of_DRAM(void); phys_addr_t memblock_end_of_DRAM(void); void memblock_enforce_memory_limit(phys_addr_t memory_limit); diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 0e5bf25d324f..34d2da05f2f1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -57,7 +57,7 @@ enum memcg_memory_event { struct mem_cgroup_reclaim_cookie { pg_data_t *pgdat; - unsigned int generation; + int generation; }; #ifdef CONFIG_MEMCG @@ -70,6 +70,7 @@ struct mem_cgroup_id { }; struct memcg_vmstats_percpu; +struct memcg1_events_percpu; struct memcg_vmstats; struct lruvec_stats_percpu; struct lruvec_stats; @@ -77,7 +78,7 @@ struct lruvec_stats; struct mem_cgroup_reclaim_iter { struct mem_cgroup *position; /* scan generation, increased every round-trip */ - unsigned int generation; + atomic_t generation; }; /* @@ -193,6 +194,11 @@ struct mem_cgroup { struct page_counter memsw; /* v1 only */ }; + /* registered local peak watchers */ + struct list_head memory_peaks; + struct list_head swap_peaks; + spinlock_t peaks_lock; + /* Range enforcement for interrupt charges */ struct work_struct high_work; @@ -270,6 +276,8 @@ struct mem_cgroup { struct page_counter kmem; /* v1 only */ struct page_counter tcpmem; /* v1 only */ + struct memcg1_events_percpu __percpu *events_percpu; + unsigned long soft_limit; /* protected by memcg_oom_lock */ @@ -361,11 +369,11 @@ static inline bool folio_memcg_kmem(struct folio *folio); * After the initialization objcg->memcg is always pointing at * a valid memcg, but can be atomically swapped to the parent memcg. * - * The caller must ensure that the returned memcg won't be released: - * e.g. acquire the rcu_read_lock or css_set_lock. + * The caller must ensure that the returned memcg won't be released. */ static inline struct mem_cgroup *obj_cgroup_memcg(struct obj_cgroup *objcg) { + lockdep_assert_once(rcu_read_lock_held() || lockdep_is_held(&cgroup_mutex)); return READ_ONCE(objcg->memcg); } @@ -439,6 +447,19 @@ static inline struct mem_cgroup *folio_memcg(struct folio *folio) return __folio_memcg(folio); } +/* + * folio_memcg_charged - If a folio is charged to a memory cgroup. + * @folio: Pointer to the folio. + * + * Returns true if folio is charged to a memory cgroup, otherwise returns false. + */ +static inline bool folio_memcg_charged(struct folio *folio) +{ + if (folio_memcg_kmem(folio)) + return __folio_objcg(folio) != NULL; + return __folio_memcg(folio) != NULL; +} + /** * folio_memcg_rcu - Locklessly get the memory cgroup associated with a folio. * @folio: Pointer to the folio. @@ -455,7 +476,6 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) unsigned long memcg_data = READ_ONCE(folio->memcg_data); VM_BUG_ON_FOLIO(folio_test_slab(folio), folio); - WARN_ON_ONCE(!rcu_read_lock_held()); if (memcg_data & MEMCG_DATA_KMEM) { struct obj_cgroup *objcg; @@ -464,6 +484,8 @@ static inline struct mem_cgroup *folio_memcg_rcu(struct folio *folio) return obj_cgroup_memcg(objcg); } + WARN_ON_ONCE(!rcu_read_lock_held()); + return (struct mem_cgroup *)(memcg_data & ~OBJEXTS_FLAGS_MASK); } @@ -677,7 +699,8 @@ int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp, int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm, gfp_t gfp, swp_entry_t entry); -void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry); + +void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr_pages); void __mem_cgroup_uncharge(struct folio *folio); @@ -762,6 +785,8 @@ struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm); struct mem_cgroup *get_mem_cgroup_from_current(void); +struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio); + struct lruvec *folio_lruvec_lock(struct folio *folio); struct lruvec *folio_lruvec_lock_irq(struct folio *folio); struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio, @@ -1006,8 +1031,8 @@ static inline void count_memcg_folio_events(struct folio *folio, count_memcg_events(memcg, idx, nr); } -static inline void count_memcg_event_mm(struct mm_struct *mm, - enum vm_event_item idx) +static inline void count_memcg_events_mm(struct mm_struct *mm, + enum vm_event_item idx, unsigned long count) { struct mem_cgroup *memcg; @@ -1017,10 +1042,16 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, rcu_read_lock(); memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); if (likely(memcg)) - count_memcg_events(memcg, idx, 1); + count_memcg_events(memcg, idx, count); rcu_read_unlock(); } +static inline void count_memcg_event_mm(struct mm_struct *mm, + enum vm_event_item idx) +{ + count_memcg_events_mm(mm, idx, 1); +} + static inline void memcg_memory_event(struct mem_cgroup *memcg, enum memcg_memory_event event) { @@ -1176,7 +1207,7 @@ static inline int mem_cgroup_swapin_charge_folio(struct folio *folio, return 0; } -static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry) +static inline void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry, unsigned int nr) { } @@ -1240,6 +1271,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_current(void) return NULL; } +static inline struct mem_cgroup *get_mem_cgroup_from_folio(struct folio *folio) +{ + return NULL; +} + static inline struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css) { @@ -1462,6 +1498,11 @@ static inline void count_memcg_folio_events(struct folio *folio, { } +static inline void count_memcg_events_mm(struct mm_struct *mm, + enum vm_event_item idx, unsigned long count) +{ +} + static inline void count_memcg_event_mm(struct mm_struct *mm, enum vm_event_item idx) { @@ -1717,7 +1758,6 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg) return memcg ? memcg->kmemcg_id : -1; } -struct mem_cgroup *mem_cgroup_from_obj(void *p); struct mem_cgroup *mem_cgroup_from_slab_obj(void *p); static inline void count_objcg_event(struct obj_cgroup *objcg, @@ -1780,11 +1820,6 @@ static inline int memcg_kmem_id(struct mem_cgroup *memcg) return -1; } -static inline struct mem_cgroup *mem_cgroup_from_obj(void *p) -{ - return NULL; -} - static inline struct mem_cgroup *mem_cgroup_from_slab_obj(void *p) { return NULL; diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index ebe876930e78..b27ddce5d324 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -16,54 +16,6 @@ struct resource; struct vmem_altmap; struct dev_pagemap; -#ifdef CONFIG_HAVE_ARCH_NODEDATA_EXTENSION -/* - * For supporting node-hotadd, we have to allocate a new pgdat. - * - * If an arch has generic style NODE_DATA(), - * node_data[nid] = kzalloc() works well. But it depends on the architecture. - * - * In general, generic_alloc_nodedata() is used. - * - */ -extern pg_data_t *arch_alloc_nodedata(int nid); -extern void arch_refresh_nodedata(int nid, pg_data_t *pgdat); - -#else /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ - -#define arch_alloc_nodedata(nid) generic_alloc_nodedata(nid) - -#ifdef CONFIG_NUMA -/* - * XXX: node aware allocation can't work well to get new node's memory at this time. - * Because, pgdat for the new node is not allocated/initialized yet itself. - * To use new node's memory, more consideration will be necessary. - */ -#define generic_alloc_nodedata(nid) \ -({ \ - memblock_alloc(sizeof(*pgdat), SMP_CACHE_BYTES); \ -}) - -extern pg_data_t *node_data[]; -static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) -{ - node_data[nid] = pgdat; -} - -#else /* !CONFIG_NUMA */ - -/* never called */ -static inline pg_data_t *generic_alloc_nodedata(int nid) -{ - BUG(); - return NULL; -} -static inline void arch_refresh_nodedata(int nid, pg_data_t *pgdat) -{ -} -#endif /* CONFIG_NUMA */ -#endif /* CONFIG_HAVE_ARCH_NODEDATA_EXTENSION */ - #ifdef CONFIG_MEMORY_HOTPLUG struct page *pfn_to_online_page(unsigned long pfn); diff --git a/include/linux/mfd/88pm80x.h b/include/linux/mfd/88pm80x.h index def5df6e74bf..551ef1c367d6 100644 --- a/include/linux/mfd/88pm80x.h +++ b/include/linux/mfd/88pm80x.h @@ -294,7 +294,7 @@ struct pm80x_chip { struct i2c_client *client; struct i2c_client *companion; struct regmap *regmap; - struct regmap_irq_chip *regmap_irq_chip; + const struct regmap_irq_chip *regmap_irq_chip; struct regmap_irq_chip_data *irq_data; int type; int irq; diff --git a/include/linux/mfd/adp5585.h b/include/linux/mfd/adp5585.h new file mode 100644 index 000000000000..016033cd68e4 --- /dev/null +++ b/include/linux/mfd/adp5585.h @@ -0,0 +1,126 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Analog Devices ADP5585 I/O expander, PWM controller and keypad controller + * + * Copyright 2022 NXP + * Copyright 2024 Ideas on Board Oy + */ + +#ifndef __MFD_ADP5585_H_ +#define __MFD_ADP5585_H_ + +#include <linux/bits.h> + +#define ADP5585_ID 0x00 +#define ADP5585_MAN_ID_VALUE 0x20 +#define ADP5585_MAN_ID_MASK GENMASK(7, 4) +#define ADP5585_INT_STATUS 0x01 +#define ADP5585_STATUS 0x02 +#define ADP5585_FIFO_1 0x03 +#define ADP5585_FIFO_2 0x04 +#define ADP5585_FIFO_3 0x05 +#define ADP5585_FIFO_4 0x06 +#define ADP5585_FIFO_5 0x07 +#define ADP5585_FIFO_6 0x08 +#define ADP5585_FIFO_7 0x09 +#define ADP5585_FIFO_8 0x0a +#define ADP5585_FIFO_9 0x0b +#define ADP5585_FIFO_10 0x0c +#define ADP5585_FIFO_11 0x0d +#define ADP5585_FIFO_12 0x0e +#define ADP5585_FIFO_13 0x0f +#define ADP5585_FIFO_14 0x10 +#define ADP5585_FIFO_15 0x11 +#define ADP5585_FIFO_16 0x12 +#define ADP5585_GPI_INT_STAT_A 0x13 +#define ADP5585_GPI_INT_STAT_B 0x14 +#define ADP5585_GPI_STATUS_A 0x15 +#define ADP5585_GPI_STATUS_B 0x16 +#define ADP5585_RPULL_CONFIG_A 0x17 +#define ADP5585_RPULL_CONFIG_B 0x18 +#define ADP5585_RPULL_CONFIG_C 0x19 +#define ADP5585_RPULL_CONFIG_D 0x1a +#define ADP5585_Rx_PULL_CFG_PU_300K 0 +#define ADP5585_Rx_PULL_CFG_PD_300K 1 +#define ADP5585_Rx_PULL_CFG_PU_100K 2 +#define ADP5585_Rx_PULL_CFG_DISABLE 3 +#define ADP5585_Rx_PULL_CFG_MASK 3 +#define ADP5585_GPI_INT_LEVEL_A 0x1b +#define ADP5585_GPI_INT_LEVEL_B 0x1c +#define ADP5585_GPI_EVENT_EN_A 0x1d +#define ADP5585_GPI_EVENT_EN_B 0x1e +#define ADP5585_GPI_INTERRUPT_EN_A 0x1f +#define ADP5585_GPI_INTERRUPT_EN_B 0x20 +#define ADP5585_DEBOUNCE_DIS_A 0x21 +#define ADP5585_DEBOUNCE_DIS_B 0x22 +#define ADP5585_GPO_DATA_OUT_A 0x23 +#define ADP5585_GPO_DATA_OUT_B 0x24 +#define ADP5585_GPO_OUT_MODE_A 0x25 +#define ADP5585_GPO_OUT_MODE_B 0x26 +#define ADP5585_GPIO_DIRECTION_A 0x27 +#define ADP5585_GPIO_DIRECTION_B 0x28 +#define ADP5585_RESET1_EVENT_A 0x29 +#define ADP5585_RESET1_EVENT_B 0x2a +#define ADP5585_RESET1_EVENT_C 0x2b +#define ADP5585_RESET2_EVENT_A 0x2c +#define ADP5585_RESET2_EVENT_B 0x2d +#define ADP5585_RESET_CFG 0x2e +#define ADP5585_PWM_OFFT_LOW 0x2f +#define ADP5585_PWM_OFFT_HIGH 0x30 +#define ADP5585_PWM_ONT_LOW 0x31 +#define ADP5585_PWM_ONT_HIGH 0x32 +#define ADP5585_PWM_CFG 0x33 +#define ADP5585_PWM_IN_AND BIT(2) +#define ADP5585_PWM_MODE BIT(1) +#define ADP5585_PWM_EN BIT(0) +#define ADP5585_LOGIC_CFG 0x34 +#define ADP5585_LOGIC_FF_CFG 0x35 +#define ADP5585_LOGIC_INT_EVENT_EN 0x36 +#define ADP5585_POLL_PTIME_CFG 0x37 +#define ADP5585_PIN_CONFIG_A 0x38 +#define ADP5585_PIN_CONFIG_B 0x39 +#define ADP5585_PIN_CONFIG_C 0x3a +#define ADP5585_PULL_SELECT BIT(7) +#define ADP5585_C4_EXTEND_CFG_GPIO11 (0U << 6) +#define ADP5585_C4_EXTEND_CFG_RESET2 (1U << 6) +#define ADP5585_C4_EXTEND_CFG_MASK GENMASK(6, 6) +#define ADP5585_R4_EXTEND_CFG_GPIO5 (0U << 5) +#define ADP5585_R4_EXTEND_CFG_RESET1 (1U << 5) +#define ADP5585_R4_EXTEND_CFG_MASK GENMASK(5, 5) +#define ADP5585_R3_EXTEND_CFG_GPIO4 (0U << 2) +#define ADP5585_R3_EXTEND_CFG_LC (1U << 2) +#define ADP5585_R3_EXTEND_CFG_PWM_OUT (2U << 2) +#define ADP5585_R3_EXTEND_CFG_MASK GENMASK(3, 2) +#define ADP5585_R0_EXTEND_CFG_GPIO1 (0U << 0) +#define ADP5585_R0_EXTEND_CFG_LY (1U << 0) +#define ADP5585_R0_EXTEND_CFG_MASK GENMASK(0, 0) +#define ADP5585_GENERAL_CFG 0x3b +#define ADP5585_OSC_EN BIT(7) +#define ADP5585_OSC_FREQ_50KHZ (0U << 5) +#define ADP5585_OSC_FREQ_100KHZ (1U << 5) +#define ADP5585_OSC_FREQ_200KHZ (2U << 5) +#define ADP5585_OSC_FREQ_500KHZ (3U << 5) +#define ADP5585_OSC_FREQ_MASK GENMASK(6, 5) +#define ADP5585_INT_CFG BIT(1) +#define ADP5585_RST_CFG BIT(0) +#define ADP5585_INT_EN 0x3c + +#define ADP5585_MAX_REG ADP5585_INT_EN + +/* + * Bank 0 covers pins "GPIO 1/R0" to "GPIO 6/R5", numbered 0 to 5 by the + * driver, and bank 1 covers pins "GPIO 7/C0" to "GPIO 11/C4", numbered 6 to + * 10. Some variants of the ADP5585 don't support "GPIO 6/R5". As the driver + * uses identical GPIO numbering for all variants to avoid confusion, GPIO 5 is + * marked as reserved in the device tree for variants that don't support it. + */ +#define ADP5585_BANK(n) ((n) >= 6 ? 1 : 0) +#define ADP5585_BIT(n) ((n) >= 6 ? BIT((n) - 6) : BIT(n)) + +struct regmap; + +struct adp5585_dev { + struct regmap *regmap; +}; + +#endif diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index 8c0a33a2e9ce..f4dfc1871a95 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h @@ -115,6 +115,16 @@ enum axp20x_variants { #define AXP313A_IRQ_STATE 0x21 #define AXP717_ON_INDICATE 0x00 +#define AXP717_PMU_STATUS_2 0x01 +#define AXP717_BC_DETECT 0x05 +#define AXP717_PMU_FAULT 0x08 +#define AXP717_MODULE_EN_CONTROL_1 0x0b +#define AXP717_MIN_SYS_V_CONTROL 0x15 +#define AXP717_INPUT_VOL_LIMIT_CTRL 0x16 +#define AXP717_INPUT_CUR_LIMIT_CTRL 0x17 +#define AXP717_MODULE_EN_CONTROL_2 0x19 +#define AXP717_BOOST_CONTROL 0x1e +#define AXP717_VSYS_V_POWEROFF 0x24 #define AXP717_IRQ0_EN 0x40 #define AXP717_IRQ1_EN 0x41 #define AXP717_IRQ2_EN 0x42 @@ -125,6 +135,9 @@ enum axp20x_variants { #define AXP717_IRQ2_STATE 0x4a #define AXP717_IRQ3_STATE 0x4b #define AXP717_IRQ4_STATE 0x4c +#define AXP717_ICC_CHG_SET 0x62 +#define AXP717_ITERM_CHG_SET 0x63 +#define AXP717_CV_CHG_SET 0x64 #define AXP717_DCDC_OUTPUT_CONTROL 0x80 #define AXP717_DCDC1_CONTROL 0x83 #define AXP717_DCDC2_CONTROL 0x84 @@ -145,6 +158,19 @@ enum axp20x_variants { #define AXP717_CLDO3_CONTROL 0x9d #define AXP717_CLDO4_CONTROL 0x9e #define AXP717_CPUSLDO_CONTROL 0x9f +#define AXP717_BATT_PERCENT_DATA 0xa4 +#define AXP717_ADC_CH_EN_CONTROL 0xc0 +#define AXP717_BATT_V_H 0xc4 +#define AXP717_BATT_V_L 0xc5 +#define AXP717_VBUS_V_H 0xc6 +#define AXP717_VBUS_V_L 0xc7 +#define AXP717_VSYS_V_H 0xc8 +#define AXP717_VSYS_V_L 0xc9 +#define AXP717_BATT_CHRG_I_H 0xca +#define AXP717_BATT_CHRG_I_L 0xcb +#define AXP717_ADC_DATA_SEL 0xcd +#define AXP717_ADC_DATA_H 0xce +#define AXP717_ADC_DATA_L 0xcf #define AXP806_STARTUP_SRC 0x00 #define AXP806_CHIP_ID 0x03 @@ -484,6 +510,7 @@ enum { AXP717_CLDO3, AXP717_CLDO4, AXP717_CPUSLDO, + AXP717_BOOST, AXP717_REG_ID_MAX, }; diff --git a/include/linux/mfd/ds1wm.h b/include/linux/mfd/ds1wm.h deleted file mode 100644 index 43dfca1c9702..000000000000 --- a/include/linux/mfd/ds1wm.h +++ /dev/null @@ -1,29 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* MFD cell driver data for the DS1WM driver - * - * to be defined in the MFD device that is - * using this driver for one of his sub devices - */ - -struct ds1wm_driver_data { - int active_high; - int clock_rate; - /* in milliseconds, the amount of time to - * sleep following a reset pulse. Zero - * should work if your bus devices recover - * time respects the 1-wire spec since the - * ds1wm implements the precise timings of - * a reset pulse/presence detect sequence. - */ - unsigned int reset_recover_delay; - - /* Say 1 here for big endian Hardware - * (only relevant with bus-shift > 0 - */ - bool is_hw_big_endian; - - /* left shift of register number to get register address offsett. - * Only 0,1,2 allowed for 8,16 or 32 bit bus width respectively - */ - unsigned int bus_shift; -}; diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h index 54444ff2a5de..20c5e02ed9da 100644 --- a/include/linux/mfd/max77693-private.h +++ b/include/linux/mfd/max77693-private.h @@ -217,6 +217,10 @@ enum max77693_charger_battery_state { #define CHG_CNFG_01_CHGRSTRT_MASK (0x3 << CHG_CNFG_01_CHGRSTRT_SHIFT) #define CHG_CNFG_01_PQEN_MAKS BIT(CHG_CNFG_01_PQEN_SHIFT) +/* MAX77693_CHG_REG_CHG_CNFG_02 register */ +#define CHG_CNFG_02_CC_SHIFT 0 +#define CHG_CNFG_02_CC_MASK 0x3F + /* MAX77693_CHG_REG_CHG_CNFG_03 register */ #define CHG_CNFG_03_TOITH_SHIFT 0 #define CHG_CNFG_03_TOTIME_SHIFT 3 @@ -244,6 +248,7 @@ enum max77693_charger_battery_state { #define CHG_CNFG_12_VCHGINREG_MASK (0x3 << CHG_CNFG_12_VCHGINREG_SHIFT) /* MAX77693 CHG_CNFG_09 Register */ +#define CHG_CNFG_09_CHGIN_ILIM_SHIFT 0 #define CHG_CNFG_09_CHGIN_ILIM_MASK 0x7F /* MAX77693 CHG_CTRL Register */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index 644be30b69c8..002e49b2ebd9 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -70,6 +70,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, unsigned int *ret_succeeded); struct folio *alloc_migration_target(struct folio *src, unsigned long private); bool isolate_movable_page(struct page *page, isolate_mode_t mode); +bool isolate_folio_to_list(struct folio *folio, struct list_head *list); int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src); @@ -91,6 +92,8 @@ static inline struct folio *alloc_migration_target(struct folio *src, { return NULL; } static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) { return false; } +static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list) + { return false; } static inline int migrate_huge_page_move_mapping(struct address_space *mapping, struct folio *dst, struct folio *src) diff --git a/include/linux/mii.h b/include/linux/mii.h index d5a959ce4877..b8f26d4513c3 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h @@ -140,7 +140,7 @@ static inline u32 ethtool_adv_to_mii_adv_t(u32 ethadv) * settings to phy autonegotiation advertisements for the * MII_ADVERTISE register. */ -static inline u32 linkmode_adv_to_mii_adv_t(unsigned long *advertising) +static inline u32 linkmode_adv_to_mii_adv_t(const unsigned long *advertising) { u32 result = 0; @@ -215,7 +215,8 @@ static inline u32 ethtool_adv_to_mii_ctrl1000_t(u32 ethadv) * settings to phy autonegotiation advertisements for the * MII_CTRL1000 register when in 1000T mode. */ -static inline u32 linkmode_adv_to_mii_ctrl1000_t(unsigned long *advertising) +static inline u32 +linkmode_adv_to_mii_ctrl1000_t(const unsigned long *advertising) { u32 result = 0; @@ -453,7 +454,7 @@ static inline void mii_ctrl1000_mod_linkmode_adv_t(unsigned long *advertising, * A small helper function that translates linkmode advertising to LVL * pause capabilities. */ -static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising) +static inline u32 linkmode_adv_to_lcl_adv_t(const unsigned long *advertising) { u32 lcl_adv = 0; diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index ba875a619b97..cc647992f3d1 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h @@ -211,6 +211,7 @@ enum { enum { MLX5_PFAULT_SUBTYPE_WQE = 0, MLX5_PFAULT_SUBTYPE_RDMA = 1, + MLX5_PFAULT_SUBTYPE_MEMORY = 2, }; enum wqe_page_fault_type { @@ -370,6 +371,7 @@ enum mlx5_driver_event { MLX5_DRIVER_EVENT_SF_PEER_DEVLINK, MLX5_DRIVER_EVENT_AFFILIATION_DONE, MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, + MLX5_DRIVER_EVENT_ACTIVE_BACKUP_LAG_CHANGE_LOWERSTATE, }; enum { @@ -646,10 +648,11 @@ struct mlx5_eqe_page_req { __be32 rsvd1[5]; }; +#define MEMORY_SCHEME_PAGE_FAULT_GRANULARITY 4096 struct mlx5_eqe_page_fault { - __be32 bytes_committed; union { struct { + __be32 bytes_committed; u16 reserved1; __be16 wqe_index; u16 reserved2; @@ -659,6 +662,7 @@ struct mlx5_eqe_page_fault { __be32 pftype_wq; } __packed wqe; struct { + __be32 bytes_committed; __be32 r_key; u16 reserved1; __be16 packet_length; @@ -666,6 +670,23 @@ struct mlx5_eqe_page_fault { __be64 rdma_va; __be32 pftype_token; } __packed rdma; + struct { + u8 flags; + u8 reserved1; + __be16 post_demand_fault_pages; + __be16 pre_demand_fault_pages; + __be16 token47_32; + __be32 token31_0; + /* + * FW changed from specifying the fault size in byte + * count to 4k pages granularity. The size specified + * in pages uses bits 31:12, to keep backward + * compatibility. + */ + __be32 demand_fault_pages; + __be32 mkey; + __be64 va; + } __packed memory; } __packed; } __packed; @@ -1243,7 +1264,8 @@ enum mlx5_pcam_feature_groups { enum mlx5_mcam_reg_groups { MLX5_MCAM_REGS_FIRST_128 = 0x0, MLX5_MCAM_REGS_0x9100_0x917F = 0x2, - MLX5_MCAM_REGS_NUM = 0x3, + MLX5_MCAM_REGS_0x9180_0x91FF = 0x3, + MLX5_MCAM_REGS_NUM = 0x4, }; enum mlx5_mcam_feature_groups { @@ -1369,6 +1391,14 @@ enum mlx5_qcam_feature_groups { #define MLX5_CAP_ODP(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap) +#define MLX5_CAP_ODP_SCHEME(mdev, cap) \ + (MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \ + mem_page_fault) ? \ + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \ + memory_page_fault_scheme_cap.cap) : \ + MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, \ + transport_page_fault_scheme_cap.cap)) + #define MLX5_CAP_ODP_MAX(mdev, cap)\ MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap) @@ -1392,6 +1422,10 @@ enum mlx5_qcam_feature_groups { MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \ mng_access_reg_cap_mask.access_regs2.reg) +#define MLX5_CAP_MCAM_REG3(mdev, reg) \ + MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9180_0x91FF], \ + mng_access_reg_cap_mask.access_regs3.reg) + #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld) @@ -1444,6 +1478,7 @@ enum { MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4, MLX5_CMD_STAT_BAD_RES_ERR = 0x5, MLX5_CMD_STAT_RES_BUSY = 0x6, + MLX5_CMD_STAT_NOT_READY = 0x7, MLX5_CMD_STAT_LIM_ERR = 0x8, MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9, MLX5_CMD_STAT_IX_ERR = 0xa, diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a96438ded15f..e23c692a34c7 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -159,6 +159,8 @@ enum { MLX5_REG_MSECQ = 0x9155, MLX5_REG_MSEES = 0x9156, MLX5_REG_MIRC = 0x9162, + MLX5_REG_MTPTM = 0x9180, + MLX5_REG_MTCTR = 0x9181, MLX5_REG_SBCAM = 0xB01F, MLX5_REG_RESOURCE_DUMP = 0xC000, MLX5_REG_DTOR = 0xC00E, @@ -643,6 +645,7 @@ struct mlx5_priv { struct mlx5_sf_hw_table *sf_hw_table; struct mlx5_sf_table *sf_table; #endif + struct blocking_notifier_head lag_nh; }; enum mlx5_device_state { @@ -1181,7 +1184,6 @@ bool mlx5_lag_mode_is_hash(struct mlx5_core_dev *dev); bool mlx5_lag_is_master(struct mlx5_core_dev *dev); bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev); bool mlx5_lag_is_mpesw(struct mlx5_core_dev *dev); -struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); u8 mlx5_lag_get_slave_port(struct mlx5_core_dev *dev, struct net_device *slave); int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index 3fb428ce7d1c..b744e554f014 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h @@ -342,4 +342,7 @@ void mlx5_packet_reformat_dealloc(struct mlx5_core_dev *dev, struct mlx5_pkt_reformat *reformat); u32 mlx5_flow_table_id(struct mlx5_flow_table *ft); + +struct mlx5_flow_root_namespace * +mlx5_get_root_namespace(struct mlx5_core_dev *dev, enum mlx5_flow_namespace_type ns_type); #endif diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index cab228cf51c6..97f6de69f616 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -80,23 +80,15 @@ enum { enum { MLX5_OBJ_TYPE_SW_ICM = 0x0008, - MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23, -}; - -enum { - MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), - MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11), - MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13), - MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT = - (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT), - MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39), -}; - -enum { MLX5_OBJ_TYPE_GENEVE_TLV_OPT = 0x000b, MLX5_OBJ_TYPE_VIRTIO_NET_Q = 0x000d, MLX5_OBJ_TYPE_VIRTIO_Q_COUNTERS = 0x001c, MLX5_OBJ_TYPE_MATCH_DEFINER = 0x0018, + MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT = 0x23, + MLX5_OBJ_TYPE_STC = 0x0040, + MLX5_OBJ_TYPE_RTC = 0x0041, + MLX5_OBJ_TYPE_STE = 0x0042, + MLX5_OBJ_TYPE_MODIFY_HDR_PATTERN = 0x0043, MLX5_OBJ_TYPE_PAGE_TRACK = 0x46, MLX5_OBJ_TYPE_MKEY = 0xff01, MLX5_OBJ_TYPE_QP = 0xff02, @@ -112,6 +104,16 @@ enum { MLX5_OBJ_TYPE_RQT = 0xff0e, MLX5_OBJ_TYPE_FLOW_COUNTER = 0xff0f, MLX5_OBJ_TYPE_CQ = 0xff10, + MLX5_OBJ_TYPE_FT_ALIAS = 0xff15, +}; + +enum { + MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM), + MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11), + MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13), + MLX5_GENERAL_OBJ_TYPES_CAP_HEADER_MODIFY_ARGUMENT = + (1ULL << MLX5_OBJ_TYPE_HEADER_MODIFY_ARGUMENT), + MLX5_GENERAL_OBJ_TYPES_CAP_MACSEC_OFFLOAD = (1ULL << 39), }; enum { @@ -313,6 +315,8 @@ enum { MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e, MLX5_CMD_OP_SYNC_CRYPTO = 0xb12, MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16, + MLX5_CMD_OP_GENERATE_WQE = 0xb17, + MLX5_CMD_OPCODE_QUERY_VUID = 0xb22, MLX5_CMD_OP_MAX }; @@ -485,7 +489,13 @@ struct mlx5_ifc_flow_table_prop_layout_bits { u8 reserved_at_66[0x2]; u8 reformat_add_macsec[0x1]; u8 reformat_remove_macsec[0x1]; - u8 reserved_at_6a[0xe]; + u8 reparse[0x1]; + u8 reserved_at_6b[0x1]; + u8 cross_vhca_object[0x1]; + u8 reformat_l2_to_l3_audp_tunnel[0x1]; + u8 reformat_l3_audp_tunnel_to_l2[0x1]; + u8 ignore_flow_level_rtc_valid[0x1]; + u8 reserved_at_70[0x8]; u8 log_max_ft_num[0x8]; u8 reserved_at_80[0x10]; @@ -522,7 +532,15 @@ struct mlx5_ifc_ipv6_layout_bits { u8 ipv6[16][0x8]; }; +struct mlx5_ifc_ipv6_simple_layout_bits { + u8 ipv6_127_96[0x20]; + u8 ipv6_95_64[0x20]; + u8 ipv6_63_32[0x20]; + u8 ipv6_31_0[0x20]; +}; + union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits { + struct mlx5_ifc_ipv6_simple_layout_bits ipv6_simple_layout; struct mlx5_ifc_ipv6_layout_bits ipv6_layout; struct mlx5_ifc_ipv4_layout_bits ipv4_layout; u8 reserved_at_0[0x80]; @@ -911,7 +929,9 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 reserved_at_8[0x5]; u8 fdb_uplink_hairpin[0x1]; u8 fdb_multi_path_any_table_limit_regc[0x1]; - u8 reserved_at_f[0x3]; + u8 reserved_at_f[0x1]; + u8 fdb_dynamic_tunnel[0x1]; + u8 reserved_at_11[0x1]; u8 fdb_multi_path_any_table[0x1]; u8 reserved_at_13[0x2]; u8 fdb_modify_header_fwd_to_table[0x1]; @@ -950,6 +970,73 @@ struct mlx5_ifc_flow_table_eswitch_cap_bits { u8 reserved_at_1900[0x6700]; }; +struct mlx5_ifc_wqe_based_flow_table_cap_bits { + u8 reserved_at_0[0x3]; + u8 log_max_num_ste[0x5]; + u8 reserved_at_8[0x3]; + u8 log_max_num_stc[0x5]; + u8 reserved_at_10[0x3]; + u8 log_max_num_rtc[0x5]; + u8 reserved_at_18[0x3]; + u8 log_max_num_header_modify_pattern[0x5]; + + u8 rtc_hash_split_table[0x1]; + u8 rtc_linear_lookup_table[0x1]; + u8 reserved_at_22[0x1]; + u8 stc_alloc_log_granularity[0x5]; + u8 reserved_at_28[0x3]; + u8 stc_alloc_log_max[0x5]; + u8 reserved_at_30[0x3]; + u8 ste_alloc_log_granularity[0x5]; + u8 reserved_at_38[0x3]; + u8 ste_alloc_log_max[0x5]; + + u8 reserved_at_40[0xb]; + u8 rtc_reparse_mode[0x5]; + u8 reserved_at_50[0x3]; + u8 rtc_index_mode[0x5]; + u8 reserved_at_58[0x3]; + u8 rtc_log_depth_max[0x5]; + + u8 reserved_at_60[0x10]; + u8 ste_format[0x10]; + + u8 stc_action_type[0x80]; + + u8 header_insert_type[0x10]; + u8 header_remove_type[0x10]; + + u8 trivial_match_definer[0x20]; + + u8 reserved_at_140[0x1b]; + u8 rtc_max_num_hash_definer_gen_wqe[0x5]; + + u8 reserved_at_160[0x18]; + u8 access_index_mode[0x8]; + + u8 reserved_at_180[0x10]; + u8 ste_format_gen_wqe[0x10]; + + u8 linear_match_definer_reg_c3[0x20]; + + u8 fdb_jump_to_tir_stc[0x1]; + u8 reserved_at_1c1[0x1f]; +}; + +struct mlx5_ifc_esw_cap_bits { + u8 reserved_at_0[0x1d]; + u8 merged_eswitch[0x1]; + u8 reserved_at_1e[0x2]; + + u8 reserved_at_20[0x40]; + + u8 esw_manager_vport_number_valid[0x1]; + u8 reserved_at_61[0xf]; + u8 esw_manager_vport_number[0x10]; + + u8 reserved_at_80[0x780]; +}; + enum { MLX5_COUNTER_SOURCE_ESWITCH = 0x0, MLX5_COUNTER_FLOW_ESWITCH = 0x1, @@ -1027,7 +1114,8 @@ struct mlx5_ifc_qos_cap_bits { u8 max_tsar_bw_share[0x20]; - u8 reserved_at_100[0x20]; + u8 nic_element_type[0x10]; + u8 nic_tsar_type[0x10]; u8 reserved_at_120[0x3]; u8 log_meter_aso_granularity[0x5]; @@ -1325,11 +1413,13 @@ struct mlx5_ifc_atomic_caps_bits { u8 reserved_at_e0[0x720]; }; -struct mlx5_ifc_odp_cap_bits { +struct mlx5_ifc_odp_scheme_cap_bits { u8 reserved_at_0[0x40]; u8 sig[0x1]; - u8 reserved_at_41[0x1f]; + u8 reserved_at_41[0x4]; + u8 page_prefetch[0x1]; + u8 reserved_at_46[0x1a]; u8 reserved_at_60[0x20]; @@ -1343,7 +1433,20 @@ struct mlx5_ifc_odp_cap_bits { struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps; - u8 reserved_at_120[0x6E0]; + u8 reserved_at_120[0xe0]; +}; + +struct mlx5_ifc_odp_cap_bits { + struct mlx5_ifc_odp_scheme_cap_bits transport_page_fault_scheme_cap; + + struct mlx5_ifc_odp_scheme_cap_bits memory_page_fault_scheme_cap; + + u8 reserved_at_400[0x200]; + + u8 mem_page_fault[0x1]; + u8 reserved_at_601[0x1f]; + + u8 reserved_at_620[0x1e0]; }; struct mlx5_ifc_tls_cap_bits { @@ -1443,9 +1546,13 @@ enum { }; enum { + MLX5_FLEX_IPV4_OVER_VXLAN_ENABLED = 1 << 0, + MLX5_FLEX_IPV6_OVER_VXLAN_ENABLED = 1 << 1, + MLX5_FLEX_IPV6_OVER_IP_ENABLED = 1 << 2, MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3, MLX5_FLEX_PARSER_MPLS_OVER_GRE_ENABLED = 1 << 4, MLX5_FLEX_PARSER_MPLS_OVER_UDP_ENABLED = 1 << 5, + MLX5_FLEX_P_BIT_VXLAN_GPE_ENABLED = 1 << 6, MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7, MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8, MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9, @@ -1650,7 +1757,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 pci_sync_for_fw_update_event[0x1]; u8 reserved_at_1f2[0x6]; u8 init2_lag_tx_port_affinity[0x1]; - u8 reserved_at_1fa[0x3]; + u8 reserved_at_1fa[0x2]; + u8 wqe_based_flow_table_update_cap[0x1]; u8 cqe_version[0x4]; u8 compact_address_vector[0x1]; @@ -1764,7 +1872,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_328[0x2]; u8 relaxed_ordering_read[0x1]; u8 log_max_pd[0x5]; - u8 reserved_at_330[0x6]; + u8 reserved_at_330[0x5]; + u8 pcie_reset_using_hotreset_method[0x1]; u8 pci_sync_for_fw_update_with_driver_unload[0x1]; u8 vnic_env_cnt_steering_fail[0x1]; u8 vport_counter_local_loopback[0x1]; @@ -1885,7 +1994,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_5a0[0x10]; u8 enhanced_cqe_compression[0x1]; - u8 reserved_at_5b1[0x2]; + u8 reserved_at_5b1[0x1]; + u8 crossing_vhca_mkey[0x1]; u8 log_max_dek[0x5]; u8 reserved_at_5b8[0x4]; u8 mini_cqe_resp_stride_index[0x1]; @@ -1954,12 +2064,14 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 dynamic_msix_table_size[0xc]; u8 reserved_at_740[0xc]; u8 min_dynamic_vf_msix_table_size[0x4]; - u8 reserved_at_750[0x4]; + u8 reserved_at_750[0x2]; + u8 data_direct[0x1]; + u8 reserved_at_753[0x1]; u8 max_dynamic_vf_msix_table_size[0xc]; u8 reserved_at_760[0x3]; u8 log_max_num_header_modify_argument[0x5]; - u8 reserved_at_768[0x4]; + u8 log_header_modify_argument_granularity_offset[0x4]; u8 log_header_modify_argument_granularity[0x4]; u8 reserved_at_770[0x3]; u8 log_header_modify_argument_max_alloc[0x5]; @@ -1982,7 +2094,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_0[0x80]; u8 migratable[0x1]; - u8 reserved_at_81[0x1f]; + u8 reserved_at_81[0x11]; + u8 query_vuid[0x1]; + u8 reserved_at_93[0x5]; + u8 umr_log_entity_size_5[0x1]; + u8 reserved_at_99[0x7]; u8 max_reformat_insert_size[0x8]; u8 max_reformat_insert_offset[0x8]; @@ -2006,7 +2122,8 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_140[0x60]; u8 flow_table_type_2_type[0x8]; - u8 reserved_at_1a8[0x3]; + u8 reserved_at_1a8[0x2]; + u8 format_select_dw_8_6_ext[0x1]; u8 log_min_mkey_entity_size[0x5]; u8 reserved_at_1b0[0x10]; @@ -2022,11 +2139,22 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_250[0x10]; u8 reserved_at_260[0x120]; + + u8 format_select_dw_gtpu_dw_0[0x8]; + u8 format_select_dw_gtpu_dw_1[0x8]; + u8 format_select_dw_gtpu_dw_2[0x8]; + u8 format_select_dw_gtpu_first_ext_dw_0[0x8]; + + u8 generate_wqe_type[0x20]; + + u8 reserved_at_2c0[0xc0]; + u8 reserved_at_380[0xb]; u8 min_mkey_log_entity_size_fixed_buffer[0x5]; u8 ec_vf_vport_base[0x10]; - u8 reserved_at_3a0[0x10]; + u8 reserved_at_3a0[0xa]; + u8 max_mkey_log_entity_size_mtt[0x6]; u8 max_rqt_vhca_id[0x10]; u8 reserved_at_3c0[0x20]; @@ -2037,9 +2165,11 @@ struct mlx5_ifc_cmd_hca_cap_2_bits { u8 reserved_at_400[0x1]; u8 min_mkey_log_entity_size_fixed_buffer_valid[0x1]; - u8 reserved_at_402[0x1e]; + u8 reserved_at_402[0xe]; + u8 return_reg_id[0x10]; - u8 reserved_at_420[0x20]; + u8 reserved_at_420[0x1c]; + u8 flow_table_hash_type[0x4]; u8 reserved_at_440[0x8]; u8 max_num_eqs_24b[0x18]; @@ -2086,7 +2216,7 @@ struct mlx5_ifc_extended_dest_format_bits { u8 reserved_at_60[0x20]; }; -union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits { +union mlx5_ifc_dest_format_flow_counter_list_auto_bits { struct mlx5_ifc_extended_dest_format_bits extended_dest_format; struct mlx5_ifc_flow_counter_list_bits flow_counter_list; }; @@ -2178,7 +2308,10 @@ struct mlx5_ifc_wq_bits { u8 reserved_at_139[0x4]; u8 log_wqe_stride_size[0x3]; - u8 reserved_at_140[0x80]; + u8 dbr_umem_id[0x20]; + u8 wq_umem_id[0x20]; + + u8 wq_umem_offset[0x40]; u8 headers_mkey[0x20]; @@ -3562,6 +3695,8 @@ union mlx5_ifc_hca_cap_union_bits { struct mlx5_ifc_per_protocol_networking_offload_caps_bits per_protocol_networking_offload_caps; struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap; struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap; + struct mlx5_ifc_wqe_based_flow_table_cap_bits wqe_based_flow_table_cap; + struct mlx5_ifc_esw_cap_bits esw_cap; struct mlx5_ifc_e_switch_cap_bits e_switch_cap; struct mlx5_ifc_port_selection_cap_bits port_selection_cap; struct mlx5_ifc_qos_cap_bits qos_cap; @@ -3678,7 +3813,7 @@ struct mlx5_ifc_flow_context_bits { u8 reserved_at_1300[0x500]; - union mlx5_ifc_dest_format_struct_flow_counter_list_auto_bits destination[]; + union mlx5_ifc_dest_format_flow_counter_list_auto_bits destination[]; }; enum { @@ -3919,7 +4054,8 @@ struct mlx5_ifc_sqc_bits { u8 reg_umr[0x1]; u8 allow_swp[0x1]; u8 hairpin[0x1]; - u8 reserved_at_f[0xb]; + u8 non_wire[0x1]; + u8 reserved_at_10[0xa]; u8 ts_format[0x2]; u8 reserved_at_1c[0x4]; @@ -3966,6 +4102,7 @@ enum { ELEMENT_TYPE_CAP_MASK_VPORT = 1 << 1, ELEMENT_TYPE_CAP_MASK_VPORT_TC = 1 << 2, ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC = 1 << 3, + ELEMENT_TYPE_CAP_MASK_QUEUE_GROUP = 1 << 4, }; struct mlx5_ifc_scheduling_context_bits { @@ -4154,6 +4291,7 @@ enum { MLX5_MKC_ACCESS_MODE_KSM = 0x3, MLX5_MKC_ACCESS_MODE_SW_ICM = 0x4, MLX5_MKC_ACCESS_MODE_MEMIC = 0x5, + MLX5_MKC_ACCESS_MODE_CROSSING = 0x6, }; struct mlx5_ifc_mkc_bits { @@ -4196,14 +4334,16 @@ struct mlx5_ifc_mkc_bits { u8 bsf_octword_size[0x20]; - u8 reserved_at_120[0x80]; + u8 reserved_at_120[0x60]; + + u8 crossing_target_vhca_id[0x10]; + u8 reserved_at_190[0x10]; u8 translations_octword_size[0x20]; u8 reserved_at_1c0[0x19]; u8 relaxed_ordering_read[0x1]; - u8 reserved_at_1d9[0x1]; - u8 log_page_size[0x5]; + u8 log_page_size[0x6]; u8 reserved_at_1e0[0x20]; }; @@ -4675,6 +4815,12 @@ enum { TSAR_ELEMENT_TSAR_TYPE_ETS = 0x2, }; +enum { + TSAR_TYPE_CAP_MASK_DWRR = 1 << 0, + TSAR_TYPE_CAP_MASK_ROUND_ROBIN = 1 << 1, + TSAR_TYPE_CAP_MASK_ETS = 1 << 2, +}; + struct mlx5_ifc_tsar_element_bits { u8 reserved_at_0[0x8]; u8 tsar_type[0x8]; @@ -4961,6 +5107,16 @@ struct mlx5_ifc_set_fte_in_bits { struct mlx5_ifc_flow_context_bits flow_context; }; +struct mlx5_ifc_dest_format_bits { + u8 destination_type[0x8]; + u8 destination_id[0x18]; + + u8 destination_eswitch_owner_vhca_id_valid[0x1]; + u8 packet_reformat[0x1]; + u8 reserved_at_22[0xe]; + u8 destination_eswitch_owner_vhca_id[0x10]; +}; + struct mlx5_ifc_rts2rts_qp_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -5124,6 +5280,36 @@ struct mlx5_ifc_query_vport_state_out_bits { u8 state[0x4]; }; +struct mlx5_ifc_array1024_auto_bits { + u8 array1024_auto[32][0x20]; +}; + +struct mlx5_ifc_query_vuid_in_bits { + u8 opcode[0x10]; + u8 uid[0x10]; + + u8 reserved_at_20[0x40]; + + u8 query_vfs_vuid[0x1]; + u8 data_direct[0x1]; + u8 reserved_at_62[0xe]; + u8 vhca_id[0x10]; +}; + +struct mlx5_ifc_query_vuid_out_bits { + u8 status[0x8]; + u8 reserved_at_8[0x18]; + + u8 syndrome[0x20]; + + u8 reserved_at_40[0x1a0]; + + u8 reserved_at_1e0[0x10]; + u8 num_of_entries[0x10]; + + struct mlx5_ifc_array1024_auto_bits vuid[]; +}; + enum { MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0, MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1, @@ -6127,7 +6313,8 @@ struct mlx5_ifc_flow_table_context_bits { u8 termination_table[0x1]; u8 table_miss_action[0x4]; u8 level[0x8]; - u8 reserved_at_10[0x8]; + u8 rtc_valid[0x1]; + u8 reserved_at_11[0x7]; u8 log_size[0x8]; u8 reserved_at_20[0x8]; @@ -6137,11 +6324,21 @@ struct mlx5_ifc_flow_table_context_bits { u8 lag_master_next_table_id[0x18]; u8 reserved_at_60[0x60]; + union { + struct { + u8 sw_owner_icm_root_1[0x40]; + + u8 sw_owner_icm_root_0[0x40]; + } sws; + struct { + u8 rtc_id_0[0x20]; - u8 sw_owner_icm_root_1[0x40]; + u8 rtc_id_1[0x20]; - u8 sw_owner_icm_root_0[0x40]; + u8 reserved_at_100[0x40]; + } hws; + }; }; struct mlx5_ifc_query_flow_table_out_bits { @@ -7217,6 +7414,30 @@ struct mlx5_ifc_qp_2err_in_bits { u8 reserved_at_60[0x20]; }; +struct mlx5_ifc_trans_page_fault_info_bits { + u8 error[0x1]; + u8 reserved_at_1[0x4]; + u8 page_fault_type[0x3]; + u8 wq_number[0x18]; + + u8 reserved_at_20[0x8]; + u8 fault_token[0x18]; +}; + +struct mlx5_ifc_mem_page_fault_info_bits { + u8 error[0x1]; + u8 reserved_at_1[0xf]; + u8 fault_token_47_32[0x10]; + + u8 fault_token_31_0[0x20]; +}; + +union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits { + struct mlx5_ifc_trans_page_fault_info_bits trans_page_fault_info; + struct mlx5_ifc_mem_page_fault_info_bits mem_page_fault_info; + u8 reserved_at_0[0x40]; +}; + struct mlx5_ifc_page_fault_resume_out_bits { u8 status[0x8]; u8 reserved_at_8[0x18]; @@ -7233,13 +7454,8 @@ struct mlx5_ifc_page_fault_resume_in_bits { u8 reserved_at_20[0x10]; u8 op_mod[0x10]; - u8 error[0x1]; - u8 reserved_at_41[0x4]; - u8 page_fault_type[0x3]; - u8 wq_number[0x18]; - - u8 reserved_at_60[0x8]; - u8 token[0x18]; + union mlx5_ifc_page_fault_resume_in_page_fault_info_auto_bits + page_fault_info; }; struct mlx5_ifc_nop_out_bits { @@ -8923,7 +9139,9 @@ struct mlx5_ifc_create_qp_in_bits { struct mlx5_ifc_qpc_bits qpc; - u8 reserved_at_800[0x60]; + u8 wq_umem_offset[0x40]; + + u8 wq_umem_id[0x20]; u8 wq_umem_valid[0x1]; u8 reserved_at_861[0x1f]; @@ -8989,7 +9207,8 @@ struct mlx5_ifc_create_mkey_in_bits { u8 pg_access[0x1]; u8 mkey_umem_valid[0x1]; - u8 reserved_at_62[0x1e]; + u8 data_direct[0x1]; + u8 reserved_at_63[0x1d]; struct mlx5_ifc_mkc_bits memory_key_mkey_entry; @@ -10401,6 +10620,18 @@ struct mlx5_ifc_mcam_access_reg_bits2 { u8 regs_31_to_0[0x20]; }; +struct mlx5_ifc_mcam_access_reg_bits3 { + u8 regs_127_to_96[0x20]; + + u8 regs_95_to_64[0x20]; + + u8 regs_63_to_32[0x20]; + + u8 regs_31_to_2[0x1e]; + u8 mtctr[0x1]; + u8 mtptm[0x1]; +}; + struct mlx5_ifc_mcam_reg_bits { u8 reserved_at_0[0x8]; u8 feature_group[0x8]; @@ -10413,6 +10644,7 @@ struct mlx5_ifc_mcam_reg_bits { struct mlx5_ifc_mcam_access_reg_bits access_regs; struct mlx5_ifc_mcam_access_reg_bits1 access_regs1; struct mlx5_ifc_mcam_access_reg_bits2 access_regs2; + struct mlx5_ifc_mcam_access_reg_bits3 access_regs3; u8 reserved_at_0[0x80]; } mng_access_reg_cap_mask; @@ -11035,6 +11267,11 @@ struct mlx5_ifc_mcda_reg_bits { }; enum { + MLX5_MFRL_REG_PCI_RESET_METHOD_LINK_TOGGLE = 0, + MLX5_MFRL_REG_PCI_RESET_METHOD_HOT_RESET = 1, +}; + +enum { MLX5_MFRL_REG_RESET_STATE_IDLE = 0, MLX5_MFRL_REG_RESET_STATE_IN_NEGOTIATION = 1, MLX5_MFRL_REG_RESET_STATE_RESET_IN_PROGRESS = 2, @@ -11061,7 +11298,8 @@ struct mlx5_ifc_mfrl_reg_bits { u8 pci_sync_for_fw_update_start[0x1]; u8 pci_sync_for_fw_update_resp[0x2]; u8 rst_type_sel[0x3]; - u8 reserved_at_28[0x4]; + u8 pci_reset_req_method[0x3]; + u8 reserved_at_2b[0x1]; u8 reset_state[0x4]; u8 reset_type[0x8]; u8 reset_level[0x8]; @@ -11166,6 +11404,34 @@ struct mlx5_ifc_mtmp_reg_bits { u8 sensor_name_lo[0x20]; }; +struct mlx5_ifc_mtptm_reg_bits { + u8 reserved_at_0[0x10]; + u8 psta[0x1]; + u8 reserved_at_11[0xf]; + + u8 reserved_at_20[0x60]; +}; + +enum { + MLX5_MTCTR_REQUEST_NOP = 0x0, + MLX5_MTCTR_REQUEST_PTM_ROOT_CLOCK = 0x1, + MLX5_MTCTR_REQUEST_FREE_RUNNING_COUNTER = 0x2, + MLX5_MTCTR_REQUEST_REAL_TIME_CLOCK = 0x3, +}; + +struct mlx5_ifc_mtctr_reg_bits { + u8 first_clock_timestamp_request[0x8]; + u8 second_clock_timestamp_request[0x8]; + u8 reserved_at_10[0x10]; + + u8 first_clock_valid[0x1]; + u8 second_clock_valid[0x1]; + u8 reserved_at_22[0x1e]; + + u8 first_clock_timestamp[0x40]; + u8 second_clock_timestamp[0x40]; +}; + union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_bufferx_reg_bits bufferx_reg; struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout; @@ -11230,6 +11496,8 @@ union mlx5_ifc_ports_control_registers_document_bits { struct mlx5_ifc_mrtc_reg_bits mrtc_reg; struct mlx5_ifc_mtcap_reg_bits mtcap_reg; struct mlx5_ifc_mtmp_reg_bits mtmp_reg; + struct mlx5_ifc_mtptm_reg_bits mtptm_reg; + struct mlx5_ifc_mtctr_reg_bits mtctr_reg; u8 reserved_at_0[0x60e0]; }; diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index ad1ce650146c..fc7eeff99a8a 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h @@ -149,6 +149,7 @@ enum { MLX5_WQE_CTRL_CQ_UPDATE = 2 << 2, MLX5_WQE_CTRL_CQ_UPDATE_AND_EQE = 3 << 2, MLX5_WQE_CTRL_SOLICITED = 1 << 1, + MLX5_WQE_CTRL_INITIATOR_SMALL_FENCE = 1 << 5, }; enum { diff --git a/include/linux/mm.h b/include/linux/mm.h index 147073601716..ecf63d2b0582 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -98,7 +98,11 @@ extern int mmap_rnd_compat_bits __read_mostly; #endif #ifndef PHYSMEM_END +# ifdef MAX_PHYSMEM_BITS # define PHYSMEM_END ((1ULL << MAX_PHYSMEM_BITS) - 1) +# else +# define PHYSMEM_END (((phys_addr_t)-1)&~(1ULL<<63)) +# endif #endif #include <asm/page.h> @@ -334,12 +338,16 @@ extern unsigned int kobjsize(const void *objp); #endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */ #ifdef CONFIG_ARCH_HAS_PKEYS -# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 -# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 /* A protection key is a 4-bit value */ -# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 /* on x86 and 5-bit value on ppc64 */ -# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 -# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 -#ifdef CONFIG_PPC +# define VM_PKEY_SHIFT VM_HIGH_ARCH_BIT_0 +# define VM_PKEY_BIT0 VM_HIGH_ARCH_0 +# define VM_PKEY_BIT1 VM_HIGH_ARCH_1 +# define VM_PKEY_BIT2 VM_HIGH_ARCH_2 +#if CONFIG_ARCH_PKEY_BITS > 3 +# define VM_PKEY_BIT3 VM_HIGH_ARCH_3 +#else +# define VM_PKEY_BIT3 0 +#endif +#if CONFIG_ARCH_PKEY_BITS > 4 # define VM_PKEY_BIT4 VM_HIGH_ARCH_4 #else # define VM_PKEY_BIT4 0 @@ -363,7 +371,7 @@ extern unsigned int kobjsize(const void *objp); #if defined(CONFIG_X86) # define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */ -#elif defined(CONFIG_PPC) +#elif defined(CONFIG_PPC64) # define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */ #elif defined(CONFIG_PARISC) # define VM_GROWSUP VM_ARCH_1 @@ -378,8 +386,8 @@ extern unsigned int kobjsize(const void *objp); #endif #if defined(CONFIG_ARM64_MTE) -# define VM_MTE VM_HIGH_ARCH_0 /* Use Tagged memory for access control */ -# define VM_MTE_ALLOWED VM_HIGH_ARCH_1 /* Tagged memory permitted */ +# define VM_MTE VM_HIGH_ARCH_4 /* Use Tagged memory for access control */ +# define VM_MTE_ALLOWED VM_HIGH_ARCH_5 /* Tagged memory permitted */ #else # define VM_MTE VM_NONE # define VM_MTE_ALLOWED VM_NONE @@ -413,6 +421,8 @@ extern unsigned int kobjsize(const void *objp); #ifdef CONFIG_64BIT #define VM_DROPPABLE_BIT 40 #define VM_DROPPABLE BIT(VM_DROPPABLE_BIT) +#elif defined(CONFIG_PPC32) +#define VM_DROPPABLE VM_ARCH_1 #else #define VM_DROPPABLE VM_NONE #endif @@ -1009,27 +1019,6 @@ static inline struct vm_area_struct *vma_prev(struct vma_iterator *vmi) return mas_prev(&vmi->mas, 0); } -static inline -struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi) -{ - return mas_prev_range(&vmi->mas, 0); -} - -static inline unsigned long vma_iter_addr(struct vma_iterator *vmi) -{ - return vmi->mas.index; -} - -static inline unsigned long vma_iter_end(struct vma_iterator *vmi) -{ - return vmi->mas.last + 1; -} -static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi, - unsigned long count) -{ - return mas_expected_entries(&vmi->mas, count); -} - static inline int vma_iter_clear_gfp(struct vma_iterator *vmi, unsigned long start, unsigned long end, gfp_t gfp) { @@ -1253,8 +1242,7 @@ static inline int folio_mapcount(const struct folio *folio) if (likely(!folio_test_large(folio))) { mapcount = atomic_read(&folio->_mapcount) + 1; - /* Handle page_has_type() pages */ - if (mapcount < PAGE_MAPCOUNT_RESERVE + 1) + if (page_mapcount_is_type(mapcount)) mapcount = 0; return mapcount; } @@ -1601,6 +1589,7 @@ void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages, void unpin_user_page_range_dirty_lock(struct page *page, unsigned long npages, bool make_dirty); void unpin_user_pages(struct page **pages, unsigned long npages); +void unpin_user_folio(struct folio *folio, unsigned long npages); void unpin_folios(struct folio **folios, unsigned long nfolios); static inline bool is_cow_mapping(vm_flags_t flags) @@ -1749,6 +1738,8 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) __set_bit(pid_bit, &vma->numab_state->pids_active[1]); } } + +bool folio_use_access_time(struct folio *folio); #else /* !CONFIG_NUMA_BALANCING */ static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid) { @@ -1802,6 +1793,10 @@ static inline bool cpupid_match_pid(struct task_struct *task, int cpupid) static inline void vma_set_access_pid_bit(struct vm_area_struct *vma) { } +static inline bool folio_use_access_time(struct folio *folio) +{ + return false; +} #endif /* CONFIG_NUMA_BALANCING */ #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) @@ -2151,14 +2146,19 @@ static inline size_t folio_size(const struct folio *folio) * MM ("mapped shared"), or if the folio is only mapped into a single MM * ("mapped exclusively"). * + * For KSM folios, this function also returns "mapped shared" when a folio is + * mapped multiple times into the same MM, because the individual page mappings + * are independent. + * * As precise information is not easily available for all folios, this function * estimates the number of MMs ("sharers") that are currently mapping a folio * using the number of times the first page of the folio is currently mapped * into page tables. * - * For small anonymous folios (except KSM folios) and anonymous hugetlb folios, - * the return value will be exactly correct, because they can only be mapped - * at most once into an MM, and they cannot be partially mapped. + * For small anonymous folios and anonymous hugetlb folios, the return + * value will be exactly correct: non-KSM folios can only be mapped at most once + * into an MM, and they cannot be partially mapped. KSM folios are + * considered shared even if mapped multiple times into the same MM. * * For other folios, the result can be fuzzy: * #. For partially-mappable large folios (THP), the return value can wrongly @@ -2167,9 +2167,6 @@ static inline size_t folio_size(const struct folio *folio) * #. For pagecache folios (including hugetlb), the return value can wrongly * indicate "mapped shared" (false positive) when two VMAs in the same MM * cover the same file range. - * #. For (small) KSM folios, the return value can wrongly indicate "mapped - * shared" (false positive), when the folio is mapped multiple times into - * the same MM. * * Further, this function only considers current page table mappings that * are tracked using the folio mapcount(s). @@ -2203,26 +2200,10 @@ static inline bool folio_likely_mapped_shared(struct folio *folio) return atomic_read(&folio->_mapcount) > 0; } -#ifndef HAVE_ARCH_MAKE_PAGE_ACCESSIBLE -static inline int arch_make_page_accessible(struct page *page) -{ - return 0; -} -#endif - #ifndef HAVE_ARCH_MAKE_FOLIO_ACCESSIBLE static inline int arch_make_folio_accessible(struct folio *folio) { - int ret; - long i, nr = folio_nr_pages(folio); - - for (i = 0; i < nr; i++) { - ret = arch_make_page_accessible(folio_page(folio, i)); - if (ret) - break; - } - - return ret; + return 0; } #endif @@ -2402,11 +2383,40 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma); -int follow_pte(struct vm_area_struct *vma, unsigned long address, - pte_t **ptepp, spinlock_t **ptlp); int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, void *buf, int len, int write); +struct follow_pfnmap_args { + /** + * Inputs: + * @vma: Pointer to @vm_area_struct struct + * @address: the virtual address to walk + */ + struct vm_area_struct *vma; + unsigned long address; + /** + * Internals: + * + * The caller shouldn't touch any of these. + */ + spinlock_t *lock; + pte_t *ptep; + /** + * Outputs: + * + * @pfn: the PFN of the address + * @pgprot: the pgprot_t of the mapping + * @writable: whether the mapping is writable + * @special: whether the mapping is a special mapping (real PFN maps) + */ + unsigned long pfn; + pgprot_t pgprot; + bool writable; + bool special; +}; +int follow_pfnmap_start(struct follow_pfnmap_args *args); +void follow_pfnmap_end(struct follow_pfnmap_args *args); + extern void truncate_pagecache(struct inode *inode, loff_t new); extern void truncate_setsize(struct inode *inode, loff_t newsize); void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); @@ -2534,11 +2544,6 @@ int set_page_dirty_lock(struct page *page); int get_cmdline(struct task_struct *task, char *buffer, int buflen); -extern unsigned long move_page_tables(struct vm_area_struct *vma, - unsigned long old_addr, struct vm_area_struct *new_vma, - unsigned long new_addr, unsigned long len, - bool need_rmap_locks, bool for_stack); - /* * Flags used by change_protection(). For now we make it a bitmap so * that we can pass in multiple flags just like parameters. However @@ -2559,21 +2564,6 @@ extern unsigned long move_page_tables(struct vm_area_struct *vma, #define MM_CP_UFFD_WP_ALL (MM_CP_UFFD_WP | \ MM_CP_UFFD_WP_RESOLVE) -bool vma_needs_dirty_tracking(struct vm_area_struct *vma); -bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot); -static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma) -{ - /* - * We want to check manually if we can change individual PTEs writable - * if we can't do that automatically for all PTEs in a mapping. For - * private mappings, that's always the case when we have write - * permissions as we properly have to handle COW. - */ - if (vma->vm_flags & VM_SHARED) - return vma_wants_writenotify(vma, vma->vm_page_prot); - return !!(vma->vm_flags & VM_WRITE); - -} bool can_change_pte_writable(struct vm_area_struct *vma, unsigned long addr, pte_t pte); extern long change_protection(struct mmu_gather *tlb, @@ -2697,6 +2687,30 @@ static inline pte_t pte_mkspecial(pte_t pte) } #endif +#ifndef CONFIG_ARCH_SUPPORTS_PMD_PFNMAP +static inline bool pmd_special(pmd_t pmd) +{ + return false; +} + +static inline pmd_t pmd_mkspecial(pmd_t pmd) +{ + return pmd; +} +#endif /* CONFIG_ARCH_SUPPORTS_PMD_PFNMAP */ + +#ifndef CONFIG_ARCH_SUPPORTS_PUD_PFNMAP +static inline bool pud_special(pud_t pud) +{ + return false; +} + +static inline pud_t pud_mkspecial(pud_t pud) +{ + return pud; +} +#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ + #ifndef CONFIG_ARCH_HAS_PTE_DEVMAP static inline int pte_devmap(pte_t pte) { @@ -2889,7 +2903,7 @@ static inline void pagetable_free(struct ptdesc *pt) __free_pages(page, compound_order(page)); } -#if USE_SPLIT_PTE_PTLOCKS +#if defined(CONFIG_SPLIT_PTE_PTLOCKS) #if ALLOC_SPLIT_PTLOCKS void __init ptlock_cache_init(void); bool ptlock_alloc(struct ptdesc *ptdesc); @@ -2947,7 +2961,7 @@ static inline bool ptlock_init(struct ptdesc *ptdesc) return true; } -#else /* !USE_SPLIT_PTE_PTLOCKS */ +#else /* !defined(CONFIG_SPLIT_PTE_PTLOCKS) */ /* * We use mm->page_table_lock to guard all pagetable pages of the mm. */ @@ -2962,7 +2976,7 @@ static inline spinlock_t *ptep_lockptr(struct mm_struct *mm, pte_t *pte) static inline void ptlock_cache_init(void) {} static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; } static inline void ptlock_free(struct ptdesc *ptdesc) {} -#endif /* USE_SPLIT_PTE_PTLOCKS */ +#endif /* defined(CONFIG_SPLIT_PTE_PTLOCKS) */ static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc) { @@ -3022,7 +3036,7 @@ pte_t *pte_offset_map_nolock(struct mm_struct *mm, pmd_t *pmd, ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd))? \ NULL: pte_offset_kernel(pmd, address)) -#if USE_SPLIT_PMD_PTLOCKS +#if defined(CONFIG_SPLIT_PMD_PTLOCKS) static inline struct page *pmd_pgtable_page(pmd_t *pmd) { @@ -3281,78 +3295,9 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node); /* mmap.c */ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin); -extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long start, unsigned long end, pgoff_t pgoff, - struct vm_area_struct *next); -extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long start, unsigned long end, pgoff_t pgoff); -extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *); extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *); -extern void unlink_file_vma(struct vm_area_struct *); -extern struct vm_area_struct *copy_vma(struct vm_area_struct **, - unsigned long addr, unsigned long len, pgoff_t pgoff, - bool *need_rmap_locks); extern void exit_mmap(struct mm_struct *); -struct vm_area_struct *vma_modify(struct vma_iterator *vmi, - struct vm_area_struct *prev, - struct vm_area_struct *vma, - unsigned long start, unsigned long end, - unsigned long vm_flags, - struct mempolicy *policy, - struct vm_userfaultfd_ctx uffd_ctx, - struct anon_vma_name *anon_name); - -/* We are about to modify the VMA's flags. */ -static inline struct vm_area_struct -*vma_modify_flags(struct vma_iterator *vmi, - struct vm_area_struct *prev, - struct vm_area_struct *vma, - unsigned long start, unsigned long end, - unsigned long new_flags) -{ - return vma_modify(vmi, prev, vma, start, end, new_flags, - vma_policy(vma), vma->vm_userfaultfd_ctx, - anon_vma_name(vma)); -} - -/* We are about to modify the VMA's flags and/or anon_name. */ -static inline struct vm_area_struct -*vma_modify_flags_name(struct vma_iterator *vmi, - struct vm_area_struct *prev, - struct vm_area_struct *vma, - unsigned long start, - unsigned long end, - unsigned long new_flags, - struct anon_vma_name *new_name) -{ - return vma_modify(vmi, prev, vma, start, end, new_flags, - vma_policy(vma), vma->vm_userfaultfd_ctx, new_name); -} - -/* We are about to modify the VMA's memory policy. */ -static inline struct vm_area_struct -*vma_modify_policy(struct vma_iterator *vmi, - struct vm_area_struct *prev, - struct vm_area_struct *vma, - unsigned long start, unsigned long end, - struct mempolicy *new_pol) -{ - return vma_modify(vmi, prev, vma, start, end, vma->vm_flags, - new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma)); -} - -/* We are about to modify the VMA's flags and/or uffd context. */ -static inline struct vm_area_struct -*vma_modify_flags_uffd(struct vma_iterator *vmi, - struct vm_area_struct *prev, - struct vm_area_struct *vma, - unsigned long start, unsigned long end, - unsigned long new_flags, - struct vm_userfaultfd_ctx new_ctx) -{ - return vma_modify(vmi, prev, vma, start, end, new_flags, - vma_policy(vma), new_ctx, anon_vma_name(vma)); -} +int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift); static inline int check_data_rlimit(unsigned long rlim, unsigned long new, @@ -3385,10 +3330,6 @@ extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, unsigned long flags, const struct vm_special_mapping *spec); -/* This is an obsolete alternative to _install_special_mapping. */ -extern int install_special_mapping(struct mm_struct *mm, - unsigned long addr, unsigned long len, - unsigned long flags, struct page **pages); unsigned long randomize_stack_top(unsigned long stack_top); unsigned long randomize_page(unsigned long start, unsigned long range); @@ -3414,14 +3355,14 @@ extern unsigned long do_mmap(struct file *file, unsigned long addr, extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf, bool unlock); +int do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, + struct mm_struct *mm, unsigned long start, + unsigned long end, struct list_head *uf, bool unlock); extern int do_munmap(struct mm_struct *, unsigned long, size_t, struct list_head *uf); extern int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior); #ifdef CONFIG_MMU -extern int do_vma_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma, - unsigned long start, unsigned long end, - struct list_head *uf, bool unlock); extern int __mm_populate(unsigned long addr, unsigned long len, int ignore_errors); static inline void mm_populate(unsigned long addr, unsigned long len) @@ -3649,9 +3590,6 @@ static inline vm_fault_t vmf_fs_error(int err) return VM_FAULT_SIGBUS; } -struct page *follow_page(struct vm_area_struct *vma, unsigned long address, - unsigned int foll_flags); - static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags) { if (vm_fault & VM_FAULT_OOM) @@ -4187,18 +4125,18 @@ madvise_set_anon_name(struct mm_struct *mm, unsigned long start, #ifdef CONFIG_UNACCEPTED_MEMORY -bool range_contains_unaccepted_memory(phys_addr_t start, phys_addr_t end); -void accept_memory(phys_addr_t start, phys_addr_t end); +bool range_contains_unaccepted_memory(phys_addr_t start, unsigned long size); +void accept_memory(phys_addr_t start, unsigned long size); #else static inline bool range_contains_unaccepted_memory(phys_addr_t start, - phys_addr_t end) + unsigned long size) { return false; } -static inline void accept_memory(phys_addr_t start, phys_addr_t end) +static inline void accept_memory(phys_addr_t start, unsigned long size) { } @@ -4206,9 +4144,7 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end) static inline bool pfn_is_unaccepted_memory(unsigned long pfn) { - phys_addr_t paddr = pfn << PAGE_SHIFT; - - return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE); + return range_contains_unaccepted_memory(pfn << PAGE_SHIFT, PAGE_SIZE); } void vma_pgtable_walk_begin(struct vm_area_struct *vma); @@ -4216,4 +4152,71 @@ void vma_pgtable_walk_end(struct vm_area_struct *vma); int reserve_mem_find_by_name(const char *name, phys_addr_t *start, phys_addr_t *size); +#ifdef CONFIG_64BIT +int do_mseal(unsigned long start, size_t len_in, unsigned long flags); +#else +static inline int do_mseal(unsigned long start, size_t len_in, unsigned long flags) +{ + /* noop on 32 bit */ + return 0; +} +#endif + +#ifdef CONFIG_MEM_ALLOC_PROFILING +static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) +{ + int i; + struct alloc_tag *tag; + unsigned int nr_pages = 1 << new_order; + + if (!mem_alloc_profiling_enabled()) + return; + + tag = pgalloc_tag_get(&folio->page); + if (!tag) + return; + + for (i = nr_pages; i < (1 << old_order); i += nr_pages) { + union codetag_ref *ref = get_page_tag_ref(folio_page(folio, i)); + + if (ref) { + /* Set new reference to point to the original tag */ + alloc_tag_ref_set(ref, tag); + put_page_tag_ref(ref); + } + } +} + +static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) +{ + struct alloc_tag *tag; + union codetag_ref *ref; + + tag = pgalloc_tag_get(&old->page); + if (!tag) + return; + + ref = get_page_tag_ref(&new->page); + if (!ref) + return; + + /* Clear the old ref to the original allocation tag. */ + clear_page_tag_ref(&old->page); + /* Decrement the counters of the tag on get_new_folio. */ + alloc_tag_sub(ref, folio_nr_pages(new)); + + __alloc_tag_ref_set(ref, tag); + + put_page_tag_ref(ref); +} +#else /* !CONFIG_MEM_ALLOC_PROFILING */ +static inline void pgalloc_tag_split(struct folio *folio, int old_order, int new_order) +{ +} + +static inline void pgalloc_tag_copy(struct folio *new, struct folio *old) +{ +} +#endif /* CONFIG_MEM_ALLOC_PROFILING */ + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 485424979254..6e3bdf8e38bc 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -109,7 +109,7 @@ struct page { /** * @private: Mapping-private opaque data. * Usually used for buffer_heads if PagePrivate. - * Used for swp_entry_t if PageSwapCache. + * Used for swp_entry_t if swapcache flag set. * Indicates order in the buddy system if PageBuddy. */ unsigned long private; @@ -660,6 +660,9 @@ struct vma_numab_state { * per VM-area/task. A VM area is any part of the process virtual memory * space that has a special rule for the page-fault handlers (ie a shared * library, the executable area etc). + * + * Only explicitly marked struct members may be accessed by RCU readers before + * getting a stable reference. */ struct vm_area_struct { /* The first cache line has the info for VMA tree walking. */ @@ -675,7 +678,11 @@ struct vm_area_struct { #endif }; - struct mm_struct *vm_mm; /* The address space we belong to. */ + /* + * The address space we belong to. + * Unstable RCU readers are allowed to read this. + */ + struct mm_struct *vm_mm; pgprot_t vm_page_prot; /* Access permissions of this VMA. */ /* @@ -688,7 +695,10 @@ struct vm_area_struct { }; #ifdef CONFIG_PER_VMA_LOCK - /* Flag to indicate areas detached from the mm->mm_mt tree */ + /* + * Flag to indicate areas detached from the mm->mm_mt tree. + * Unstable RCU readers are allowed to read this. + */ bool detached; /* @@ -706,6 +716,7 @@ struct vm_area_struct { * slowpath. */ int vm_lock_seq; + /* Unstable RCU readers are allowed to read this. */ struct vma_lock *vm_lock; #endif @@ -947,7 +958,7 @@ struct mm_struct { #ifdef CONFIG_MMU_NOTIFIER struct mmu_notifier_subscriptions *notifier_subscriptions; #endif -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS +#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !defined(CONFIG_SPLIT_PMD_PTLOCKS) pgtable_t pmd_huge_pte; /* protected by page_table_lock */ #endif #ifdef CONFIG_NUMA_BALANCING @@ -1313,6 +1324,9 @@ struct vm_special_mapping { int (*mremap)(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma); + + void (*close)(const struct vm_special_mapping *sm, + struct vm_area_struct *vma); }; enum tlb_flush_reason { diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h index a2f6179b672b..bff5706b76e1 100644 --- a/include/linux/mm_types_task.h +++ b/include/linux/mm_types_task.h @@ -16,9 +16,6 @@ #include <asm/tlbbatch.h> #endif -#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS) -#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \ - IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK)) #define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8) /* diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h index 2c7928a50907..f0ac2e469b32 100644 --- a/include/linux/mmc/core.h +++ b/include/linux/mmc/core.h @@ -11,18 +11,6 @@ struct mmc_data; struct mmc_request; -enum mmc_blk_status { - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, - MMC_BLK_NEW_REQUEST, -}; - struct mmc_command { u32 opcode; u32 arg; diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 88c6a76042ee..8fc2b328ec4d 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h @@ -10,6 +10,7 @@ #include <linux/sched.h> #include <linux/device.h> #include <linux/fault-inject.h> +#include <linux/debugfs.h> #include <linux/mmc/core.h> #include <linux/mmc/card.h> @@ -264,16 +265,6 @@ struct mmc_cqe_ops { void (*cqe_recovery_finish)(struct mmc_host *host); }; -struct mmc_async_req { - /* active mmc request */ - struct mmc_request *mrq; - /* - * Check error status of completed mmc request. - * Returns 0 if success otherwise non zero. - */ - enum mmc_blk_status (*err_check)(struct mmc_card *, struct mmc_async_req *); -}; - /** * struct mmc_slot - MMC slot functions * @@ -291,20 +282,6 @@ struct mmc_slot { void *handler_priv; }; -/** - * mmc_context_info - synchronization details for mmc context - * @is_done_rcv wake up reason was done request - * @is_new_req wake up reason was new request - * @is_waiting_last_req mmc context waiting for single running request - * @wait wait queue - */ -struct mmc_context_info { - bool is_done_rcv; - bool is_new_req; - bool is_waiting_last_req; - wait_queue_head_t wait; -}; - struct regulator; struct mmc_pwrseq; @@ -672,7 +649,8 @@ static inline void mmc_debugfs_err_stats_inc(struct mmc_host *host, host->err_stats[stat] += 1; } -int mmc_sd_switch(struct mmc_card *card, int mode, int group, u8 value, u8 *resp); +int mmc_sd_switch(struct mmc_card *card, bool mode, int group, + u8 value, u8 *resp); int mmc_send_status(struct mmc_card *card, u32 *status); int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error); int mmc_send_abort_tuning(struct mmc_host *host, u32 opcode); diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 1dc6248feb83..17506e4a2835 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -666,11 +666,6 @@ enum zone_watermarks { #define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1)) #define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP) -#define min_wmark_pages(z) (z->_watermark[WMARK_MIN] + z->watermark_boost) -#define low_wmark_pages(z) (z->_watermark[WMARK_LOW] + z->watermark_boost) -#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost) -#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost) - /* * Flags used in pcp->flags field. * @@ -1016,6 +1011,32 @@ enum zone_flags { ZONE_BELOW_HIGH, /* zone is below high watermark. */ }; +static inline unsigned long wmark_pages(const struct zone *z, + enum zone_watermarks w) +{ + return z->_watermark[w] + z->watermark_boost; +} + +static inline unsigned long min_wmark_pages(const struct zone *z) +{ + return wmark_pages(z, WMARK_MIN); +} + +static inline unsigned long low_wmark_pages(const struct zone *z) +{ + return wmark_pages(z, WMARK_LOW); +} + +static inline unsigned long high_wmark_pages(const struct zone *z) +{ + return wmark_pages(z, WMARK_HIGH); +} + +static inline unsigned long promo_wmark_pages(const struct zone *z) +{ + return wmark_pages(z, WMARK_PROMO); +} + static inline unsigned long zone_managed_pages(struct zone *zone) { return (unsigned long)atomic_long_read(&zone->managed_pages); @@ -1688,7 +1709,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist, zone = zonelist_zone(z)) #define for_next_zone_zonelist_nodemask(zone, z, highidx, nodemask) \ - for (zone = z->zone; \ + for (zone = zonelist_zone(z); \ zone; \ z = next_zones_zonelist(++z, highidx, nodemask), \ zone = zonelist_zone(z)) @@ -1724,7 +1745,7 @@ static inline bool movable_only_nodes(nodemask_t *nodes) nid = first_node(*nodes); zonelist = &NODE_DATA(nid)->node_zonelists[ZONELIST_FALLBACK]; z = first_zones_zonelist(zonelist, ZONE_NORMAL, nodes); - return (!z->zone) ? true : false; + return (!zonelist_zone(z)) ? true : false; } diff --git a/include/linux/mnt_idmapping.h b/include/linux/mnt_idmapping.h index cd4d5c8781f5..b1b219bc3422 100644 --- a/include/linux/mnt_idmapping.h +++ b/include/linux/mnt_idmapping.h @@ -9,6 +9,7 @@ struct mnt_idmap; struct user_namespace; extern struct mnt_idmap nop_mnt_idmap; +extern struct mnt_idmap invalid_mnt_idmap; extern struct user_namespace init_user_ns; typedef struct { diff --git a/include/linux/mnt_namespace.h b/include/linux/mnt_namespace.h index 8f882f5881e8..70b366b64816 100644 --- a/include/linux/mnt_namespace.h +++ b/include/linux/mnt_namespace.h @@ -3,6 +3,9 @@ #define _NAMESPACE_H_ #ifdef __KERNEL__ +#include <linux/cleanup.h> +#include <linux/err.h> + struct mnt_namespace; struct fs_struct; struct user_namespace; @@ -11,6 +14,7 @@ struct ns_common; extern struct mnt_namespace *copy_mnt_ns(unsigned long, struct mnt_namespace *, struct user_namespace *, struct fs_struct *); extern void put_mnt_ns(struct mnt_namespace *ns); +DEFINE_FREE(put_mnt_ns, struct mnt_namespace *, if (!IS_ERR_OR_NULL(_T)) put_mnt_ns(_T)) extern struct ns_common *from_mnt_ns(struct mnt_namespace *); extern const struct file_operations proc_mounts_operations; diff --git a/include/linux/mpi.h b/include/linux/mpi.h index eb0d1c1db208..47be46f36435 100644 --- a/include/linux/mpi.h +++ b/include/linux/mpi.h @@ -40,79 +40,26 @@ struct gcry_mpi { typedef struct gcry_mpi *MPI; #define mpi_get_nlimbs(a) ((a)->nlimbs) -#define mpi_has_sign(a) ((a)->sign) /*-- mpiutil.c --*/ MPI mpi_alloc(unsigned nlimbs); -void mpi_clear(MPI a); void mpi_free(MPI a); int mpi_resize(MPI a, unsigned nlimbs); -static inline MPI mpi_new(unsigned int nbits) -{ - return mpi_alloc((nbits + BITS_PER_MPI_LIMB - 1) / BITS_PER_MPI_LIMB); -} - MPI mpi_copy(MPI a); -MPI mpi_alloc_like(MPI a); -void mpi_snatch(MPI w, MPI u); -MPI mpi_set(MPI w, MPI u); -MPI mpi_set_ui(MPI w, unsigned long u); -MPI mpi_alloc_set_ui(unsigned long u); -void mpi_swap_cond(MPI a, MPI b, unsigned long swap); - -/* Constants used to return constant MPIs. See mpi_init if you - * want to add more constants. - */ -#define MPI_NUMBER_OF_CONSTANTS 6 -enum gcry_mpi_constants { - MPI_C_ZERO, - MPI_C_ONE, - MPI_C_TWO, - MPI_C_THREE, - MPI_C_FOUR, - MPI_C_EIGHT -}; - -MPI mpi_const(enum gcry_mpi_constants no); /*-- mpicoder.c --*/ - -/* Different formats of external big integer representation. */ -enum gcry_mpi_format { - GCRYMPI_FMT_NONE = 0, - GCRYMPI_FMT_STD = 1, /* Twos complement stored without length. */ - GCRYMPI_FMT_PGP = 2, /* As used by OpenPGP (unsigned only). */ - GCRYMPI_FMT_SSH = 3, /* As used by SSH (like STD but with length). */ - GCRYMPI_FMT_HEX = 4, /* Hex format. */ - GCRYMPI_FMT_USG = 5, /* Like STD but unsigned. */ - GCRYMPI_FMT_OPAQUE = 8 /* Opaque format (some functions only). */ -}; - MPI mpi_read_raw_data(const void *xbuffer, size_t nbytes); MPI mpi_read_from_buffer(const void *buffer, unsigned *ret_nread); -int mpi_fromstr(MPI val, const char *str); -MPI mpi_scanval(const char *string); MPI mpi_read_raw_from_sgl(struct scatterlist *sgl, unsigned int len); void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign); int mpi_read_buffer(MPI a, uint8_t *buf, unsigned buf_len, unsigned *nbytes, int *sign); int mpi_write_to_sgl(MPI a, struct scatterlist *sg, unsigned nbytes, int *sign); -int mpi_print(enum gcry_mpi_format format, unsigned char *buffer, - size_t buflen, size_t *nwritten, MPI a); /*-- mpi-mod.c --*/ -void mpi_mod(MPI rem, MPI dividend, MPI divisor); - -/* Context used with Barrett reduction. */ -struct barrett_ctx_s; -typedef struct barrett_ctx_s *mpi_barrett_t; - -mpi_barrett_t mpi_barrett_init(MPI m, int copy); -void mpi_barrett_free(mpi_barrett_t ctx); -void mpi_mod_barrett(MPI r, MPI x, mpi_barrett_t ctx); -void mpi_mul_barrett(MPI w, MPI u, MPI v, mpi_barrett_t ctx); +int mpi_mod(MPI rem, MPI dividend, MPI divisor); /*-- mpi-pow.c --*/ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); @@ -120,7 +67,6 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod); /*-- mpi-cmp.c --*/ int mpi_cmp_ui(MPI u, ulong v); int mpi_cmp(MPI u, MPI v); -int mpi_cmpabs(MPI u, MPI v); /*-- mpi-sub-ui.c --*/ int mpi_sub_ui(MPI w, MPI u, unsigned long vval); @@ -129,138 +75,22 @@ int mpi_sub_ui(MPI w, MPI u, unsigned long vval); void mpi_normalize(MPI a); unsigned mpi_get_nbits(MPI a); int mpi_test_bit(MPI a, unsigned int n); -void mpi_set_bit(MPI a, unsigned int n); -void mpi_set_highbit(MPI a, unsigned int n); -void mpi_clear_highbit(MPI a, unsigned int n); -void mpi_clear_bit(MPI a, unsigned int n); -void mpi_rshift_limbs(MPI a, unsigned int count); -void mpi_rshift(MPI x, MPI a, unsigned int n); -void mpi_lshift_limbs(MPI a, unsigned int count); -void mpi_lshift(MPI x, MPI a, unsigned int n); +int mpi_set_bit(MPI a, unsigned int n); +int mpi_rshift(MPI x, MPI a, unsigned int n); /*-- mpi-add.c --*/ -void mpi_add_ui(MPI w, MPI u, unsigned long v); -void mpi_add(MPI w, MPI u, MPI v); -void mpi_sub(MPI w, MPI u, MPI v); -void mpi_addm(MPI w, MPI u, MPI v, MPI m); -void mpi_subm(MPI w, MPI u, MPI v, MPI m); +int mpi_add(MPI w, MPI u, MPI v); +int mpi_sub(MPI w, MPI u, MPI v); +int mpi_addm(MPI w, MPI u, MPI v, MPI m); +int mpi_subm(MPI w, MPI u, MPI v, MPI m); /*-- mpi-mul.c --*/ -void mpi_mul(MPI w, MPI u, MPI v); -void mpi_mulm(MPI w, MPI u, MPI v, MPI m); +int mpi_mul(MPI w, MPI u, MPI v); +int mpi_mulm(MPI w, MPI u, MPI v, MPI m); /*-- mpi-div.c --*/ -void mpi_tdiv_r(MPI rem, MPI num, MPI den); -void mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor); -void mpi_fdiv_q(MPI quot, MPI dividend, MPI divisor); - -/*-- mpi-inv.c --*/ -int mpi_invm(MPI x, MPI a, MPI n); - -/*-- ec.c --*/ - -/* Object to represent a point in projective coordinates */ -struct gcry_mpi_point { - MPI x; - MPI y; - MPI z; -}; - -typedef struct gcry_mpi_point *MPI_POINT; - -/* Models describing an elliptic curve */ -enum gcry_mpi_ec_models { - /* The Short Weierstrass equation is - * y^2 = x^3 + ax + b - */ - MPI_EC_WEIERSTRASS = 0, - /* The Montgomery equation is - * by^2 = x^3 + ax^2 + x - */ - MPI_EC_MONTGOMERY, - /* The Twisted Edwards equation is - * ax^2 + y^2 = 1 + bx^2y^2 - * Note that we use 'b' instead of the commonly used 'd'. - */ - MPI_EC_EDWARDS -}; - -/* Dialects used with elliptic curves */ -enum ecc_dialects { - ECC_DIALECT_STANDARD = 0, - ECC_DIALECT_ED25519, - ECC_DIALECT_SAFECURVE -}; - -/* This context is used with all our EC functions. */ -struct mpi_ec_ctx { - enum gcry_mpi_ec_models model; /* The model describing this curve. */ - enum ecc_dialects dialect; /* The ECC dialect used with the curve. */ - int flags; /* Public key flags (not always used). */ - unsigned int nbits; /* Number of bits. */ - - /* Domain parameters. Note that they may not all be set and if set - * the MPIs may be flagged as constant. - */ - MPI p; /* Prime specifying the field GF(p). */ - MPI a; /* First coefficient of the Weierstrass equation. */ - MPI b; /* Second coefficient of the Weierstrass equation. */ - MPI_POINT G; /* Base point (generator). */ - MPI n; /* Order of G. */ - unsigned int h; /* Cofactor. */ - - /* The actual key. May not be set. */ - MPI_POINT Q; /* Public key. */ - MPI d; /* Private key. */ - - const char *name; /* Name of the curve. */ - - /* This structure is private to mpi/ec.c! */ - struct { - struct { - unsigned int a_is_pminus3:1; - unsigned int two_inv_p:1; - } valid; /* Flags to help setting the helper vars below. */ - - int a_is_pminus3; /* True if A = P - 3. */ - - MPI two_inv_p; - - mpi_barrett_t p_barrett; - - /* Scratch variables. */ - MPI scratch[11]; - - /* Helper for fast reduction. */ - /* int nist_nbits; /\* If this is a NIST curve, the # of bits. *\/ */ - /* MPI s[10]; */ - /* MPI c; */ - } t; - - /* Curve specific computation routines for the field. */ - void (*addm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*subm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ec); - void (*mulm)(MPI w, MPI u, MPI v, struct mpi_ec_ctx *ctx); - void (*pow2)(MPI w, const MPI b, struct mpi_ec_ctx *ctx); - void (*mul2)(MPI w, MPI u, struct mpi_ec_ctx *ctx); -}; - -void mpi_ec_init(struct mpi_ec_ctx *ctx, enum gcry_mpi_ec_models model, - enum ecc_dialects dialect, - int flags, MPI p, MPI a, MPI b); -void mpi_ec_deinit(struct mpi_ec_ctx *ctx); -MPI_POINT mpi_point_new(unsigned int nbits); -void mpi_point_release(MPI_POINT p); -void mpi_point_init(MPI_POINT p); -void mpi_point_free_parts(MPI_POINT p); -int mpi_ec_get_affine(MPI x, MPI y, MPI_POINT point, struct mpi_ec_ctx *ctx); -void mpi_ec_add_points(MPI_POINT result, - MPI_POINT p1, MPI_POINT p2, - struct mpi_ec_ctx *ctx); -void mpi_ec_mul_point(MPI_POINT result, - MPI scalar, MPI_POINT point, - struct mpi_ec_ctx *ctx); -int mpi_ec_curve_point(MPI_POINT point, struct mpi_ec_ctx *ctx); +int mpi_tdiv_r(MPI rem, MPI num, MPI den); +int mpi_fdiv_r(MPI rem, MPI dividend, MPI divisor); /* inline functions */ diff --git a/include/linux/msi.h b/include/linux/msi.h index 944979763825..b10093c4d00e 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h @@ -554,6 +554,8 @@ enum { MSI_FLAG_MSIX_CONTIGUOUS = (1 << 19), /* PCI/MSI-X vectors can be dynamically allocated/freed post MSI-X enable */ MSI_FLAG_PCI_MSIX_ALLOC_DYN = (1 << 20), + /* PCI MSIs cannot be steered separately to CPU cores */ + MSI_FLAG_NO_AFFINITY = (1 << 21), }; /** diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h index b2996dc987ff..1e4208040956 100644 --- a/include/linux/mtd/nand.h +++ b/include/linux/mtd/nand.h @@ -103,6 +103,8 @@ enum nand_page_io_req_type { * @ooblen: the number of OOB bytes to read from/write to this page * @oobbuf: buffer to store OOB data in or get OOB data from * @mode: one of the %MTD_OPS_XXX mode + * @continuous: no need to start over the operation at the end of each page, the + * NAND device will automatically prepare the next one * * This object is used to pass per-page I/O requests to NAND sub-layers. This * way all useful information are already formatted in a useful way and @@ -125,6 +127,7 @@ struct nand_page_io_req { void *in; } oobbuf; int mode; + bool continuous; }; const struct mtd_ooblayout_ops *nand_get_small_page_ooblayout(void); @@ -906,19 +909,19 @@ static inline void nanddev_pos_next_page(struct nand_device *nand, } /** - * nand_io_iter_init - Initialize a NAND I/O iterator + * nand_io_page_iter_init - Initialize a NAND I/O iterator * @nand: NAND device * @offs: absolute offset * @req: MTD request * @iter: NAND I/O iterator * * Initializes a NAND iterator based on the information passed by the MTD - * layer. + * layer for page jumps. */ -static inline void nanddev_io_iter_init(struct nand_device *nand, - enum nand_page_io_req_type reqtype, - loff_t offs, struct mtd_oob_ops *req, - struct nand_io_iter *iter) +static inline void nanddev_io_page_iter_init(struct nand_device *nand, + enum nand_page_io_req_type reqtype, + loff_t offs, struct mtd_oob_ops *req, + struct nand_io_iter *iter) { struct mtd_info *mtd = nanddev_to_mtd(nand); @@ -937,6 +940,43 @@ static inline void nanddev_io_iter_init(struct nand_device *nand, iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page - iter->req.ooboffs, iter->oobleft); + iter->req.continuous = false; +} + +/** + * nand_io_block_iter_init - Initialize a NAND I/O iterator + * @nand: NAND device + * @offs: absolute offset + * @req: MTD request + * @iter: NAND I/O iterator + * + * Initializes a NAND iterator based on the information passed by the MTD + * layer for block jumps (no OOB) + * + * In practice only reads may leverage this iterator. + */ +static inline void nanddev_io_block_iter_init(struct nand_device *nand, + enum nand_page_io_req_type reqtype, + loff_t offs, struct mtd_oob_ops *req, + struct nand_io_iter *iter) +{ + unsigned int offs_in_eb; + + iter->req.type = reqtype; + iter->req.mode = req->mode; + iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos); + iter->req.ooboffs = 0; + iter->oobbytes_per_page = 0; + iter->dataleft = req->len; + iter->oobleft = 0; + iter->req.databuf.in = req->datbuf; + offs_in_eb = (nand->memorg.pagesize * iter->req.pos.page) + iter->req.dataoffs; + iter->req.datalen = min_t(unsigned int, + nanddev_eraseblock_size(nand) - offs_in_eb, + iter->dataleft); + iter->req.oobbuf.in = NULL; + iter->req.ooblen = 0; + iter->req.continuous = true; } /** @@ -963,6 +1003,25 @@ static inline void nanddev_io_iter_next_page(struct nand_device *nand, } /** + * nand_io_iter_next_block - Move to the next block + * @nand: NAND device + * @iter: NAND I/O iterator + * + * Updates the @iter to point to the next block. + * No OOB handling available. + */ +static inline void nanddev_io_iter_next_block(struct nand_device *nand, + struct nand_io_iter *iter) +{ + nanddev_pos_next_eraseblock(nand, &iter->req.pos); + iter->dataleft -= iter->req.datalen; + iter->req.databuf.in += iter->req.datalen; + iter->req.dataoffs = 0; + iter->req.datalen = min_t(unsigned int, nanddev_eraseblock_size(nand), + iter->dataleft); +} + +/** * nand_io_iter_end - Should end iteration or not * @nand: NAND device * @iter: NAND I/O iterator @@ -990,13 +1049,28 @@ static inline bool nanddev_io_iter_end(struct nand_device *nand, * @req: MTD I/O request * @iter: NAND I/O iterator * - * Should be used for iterate over pages that are contained in an MTD request. + * Should be used for iterating over pages that are contained in an MTD request. */ #define nanddev_io_for_each_page(nand, type, start, req, iter) \ - for (nanddev_io_iter_init(nand, type, start, req, iter); \ + for (nanddev_io_page_iter_init(nand, type, start, req, iter); \ !nanddev_io_iter_end(nand, iter); \ nanddev_io_iter_next_page(nand, iter)) +/** + * nand_io_for_each_block - Iterate over all NAND pages contained in an MTD I/O + * request, one block at a time + * @nand: NAND device + * @start: start address to read/write from + * @req: MTD I/O request + * @iter: NAND I/O iterator + * + * Should be used for iterating over blocks that are contained in an MTD request. + */ +#define nanddev_io_for_each_block(nand, type, start, req, iter) \ + for (nanddev_io_block_iter_init(nand, type, start, req, iter); \ + !nanddev_io_iter_end(nand, iter); \ + nanddev_io_iter_next_block(nand, iter)) + bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos); bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos); int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos); diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 5c19ead60499..702e5fb13dae 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -312,6 +312,8 @@ struct spinand_ecc_info { #define SPINAND_HAS_QE_BIT BIT(0) #define SPINAND_HAS_CR_FEAT_BIT BIT(1) +#define SPINAND_HAS_PROG_PLANE_SELECT_BIT BIT(2) +#define SPINAND_HAS_READ_PLANE_SELECT_BIT BIT(3) /** * struct spinand_ondie_ecc_conf - private SPI-NAND on-die ECC engine structure @@ -336,6 +338,7 @@ struct spinand_ondie_ecc_conf { * @op_variants.update_cache: variants of the update-cache operation * @select_target: function used to select a target/die. Required only for * multi-die chips + * @set_cont_read: enable/disable continuous cached reads * * Each SPI NAND manufacturer driver should have a spinand_info table * describing all the chips supported by the driver. @@ -354,6 +357,8 @@ struct spinand_info { } op_variants; int (*select_target)(struct spinand_device *spinand, unsigned int target); + int (*set_cont_read)(struct spinand_device *spinand, + bool enable); }; #define SPINAND_ID(__method, ...) \ @@ -379,6 +384,9 @@ struct spinand_info { #define SPINAND_SELECT_TARGET(__func) \ .select_target = __func, +#define SPINAND_CONT_READ(__set_cont_read) \ + .set_cont_read = __set_cont_read, + #define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \ __flags, ...) \ { \ @@ -422,6 +430,12 @@ struct spinand_dirmap { * passed in spi_mem_op be DMA-able, so we can't based the bufs on * the stack * @manufacturer: SPI NAND manufacturer information + * @cont_read_possible: Field filled by the core once the whole system + * configuration is known to tell whether continuous reads are + * suitable to use or not in general with this chip/configuration. + * A per-transfer check must of course be done to ensure it is + * actually relevant to enable this feature. + * @set_cont_read: Enable/disable the continuous read feature * @priv: manufacturer private data */ struct spinand_device { @@ -451,6 +465,10 @@ struct spinand_device { u8 *scratchbuf; const struct spinand_manufacturer *manufacturer; void *priv; + + bool cont_read_possible; + int (*set_cont_read)(struct spinand_device *spinand, + bool enable); }; /** @@ -517,6 +535,7 @@ int spinand_match_and_init(struct spinand_device *spinand, enum spinand_readid_method rdid_method); int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); +int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val); int spinand_select_target(struct spinand_device *spinand, unsigned int target); #endif /* __LINUX_MTD_SPINAND_H */ diff --git a/include/linux/mv643xx.h b/include/linux/mv643xx.h deleted file mode 100644 index 000b126acfb6..000000000000 --- a/include/linux/mv643xx.h +++ /dev/null @@ -1,921 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * mv643xx.h - MV-643XX Internal registers definition file. - * - * Copyright 2002 Momentum Computer, Inc. - * Author: Matthew Dharm <mdharm@momenco.com> - * Copyright 2002 GALILEO TECHNOLOGY, LTD. - */ -#ifndef __ASM_MV643XX_H -#define __ASM_MV643XX_H - -#include <asm/types.h> -#include <linux/mv643xx_eth.h> -#include <linux/mv643xx_i2c.h> - -/****************************************/ -/* Processor Address Space */ -/****************************************/ - -/* DDR SDRAM BAR and size registers */ - -#define MV64340_CS_0_BASE_ADDR 0x008 -#define MV64340_CS_0_SIZE 0x010 -#define MV64340_CS_1_BASE_ADDR 0x208 -#define MV64340_CS_1_SIZE 0x210 -#define MV64340_CS_2_BASE_ADDR 0x018 -#define MV64340_CS_2_SIZE 0x020 -#define MV64340_CS_3_BASE_ADDR 0x218 -#define MV64340_CS_3_SIZE 0x220 - -/* Devices BAR and size registers */ - -#define MV64340_DEV_CS0_BASE_ADDR 0x028 -#define MV64340_DEV_CS0_SIZE 0x030 -#define MV64340_DEV_CS1_BASE_ADDR 0x228 -#define MV64340_DEV_CS1_SIZE 0x230 -#define MV64340_DEV_CS2_BASE_ADDR 0x248 -#define MV64340_DEV_CS2_SIZE 0x250 -#define MV64340_DEV_CS3_BASE_ADDR 0x038 -#define MV64340_DEV_CS3_SIZE 0x040 -#define MV64340_BOOTCS_BASE_ADDR 0x238 -#define MV64340_BOOTCS_SIZE 0x240 - -/* PCI 0 BAR and size registers */ - -#define MV64340_PCI_0_IO_BASE_ADDR 0x048 -#define MV64340_PCI_0_IO_SIZE 0x050 -#define MV64340_PCI_0_MEMORY0_BASE_ADDR 0x058 -#define MV64340_PCI_0_MEMORY0_SIZE 0x060 -#define MV64340_PCI_0_MEMORY1_BASE_ADDR 0x080 -#define MV64340_PCI_0_MEMORY1_SIZE 0x088 -#define MV64340_PCI_0_MEMORY2_BASE_ADDR 0x258 -#define MV64340_PCI_0_MEMORY2_SIZE 0x260 -#define MV64340_PCI_0_MEMORY3_BASE_ADDR 0x280 -#define MV64340_PCI_0_MEMORY3_SIZE 0x288 - -/* PCI 1 BAR and size registers */ -#define MV64340_PCI_1_IO_BASE_ADDR 0x090 -#define MV64340_PCI_1_IO_SIZE 0x098 -#define MV64340_PCI_1_MEMORY0_BASE_ADDR 0x0a0 -#define MV64340_PCI_1_MEMORY0_SIZE 0x0a8 -#define MV64340_PCI_1_MEMORY1_BASE_ADDR 0x0b0 -#define MV64340_PCI_1_MEMORY1_SIZE 0x0b8 -#define MV64340_PCI_1_MEMORY2_BASE_ADDR 0x2a0 -#define MV64340_PCI_1_MEMORY2_SIZE 0x2a8 -#define MV64340_PCI_1_MEMORY3_BASE_ADDR 0x2b0 -#define MV64340_PCI_1_MEMORY3_SIZE 0x2b8 - -/* SRAM base address */ -#define MV64340_INTEGRATED_SRAM_BASE_ADDR 0x268 - -/* internal registers space base address */ -#define MV64340_INTERNAL_SPACE_BASE_ADDR 0x068 - -/* Enables the CS , DEV_CS , PCI 0 and PCI 1 - windows above */ -#define MV64340_BASE_ADDR_ENABLE 0x278 - -/****************************************/ -/* PCI remap registers */ -/****************************************/ - /* PCI 0 */ -#define MV64340_PCI_0_IO_ADDR_REMAP 0x0f0 -#define MV64340_PCI_0_MEMORY0_LOW_ADDR_REMAP 0x0f8 -#define MV64340_PCI_0_MEMORY0_HIGH_ADDR_REMAP 0x320 -#define MV64340_PCI_0_MEMORY1_LOW_ADDR_REMAP 0x100 -#define MV64340_PCI_0_MEMORY1_HIGH_ADDR_REMAP 0x328 -#define MV64340_PCI_0_MEMORY2_LOW_ADDR_REMAP 0x2f8 -#define MV64340_PCI_0_MEMORY2_HIGH_ADDR_REMAP 0x330 -#define MV64340_PCI_0_MEMORY3_LOW_ADDR_REMAP 0x300 -#define MV64340_PCI_0_MEMORY3_HIGH_ADDR_REMAP 0x338 - /* PCI 1 */ -#define MV64340_PCI_1_IO_ADDR_REMAP 0x108 -#define MV64340_PCI_1_MEMORY0_LOW_ADDR_REMAP 0x110 -#define MV64340_PCI_1_MEMORY0_HIGH_ADDR_REMAP 0x340 -#define MV64340_PCI_1_MEMORY1_LOW_ADDR_REMAP 0x118 -#define MV64340_PCI_1_MEMORY1_HIGH_ADDR_REMAP 0x348 -#define MV64340_PCI_1_MEMORY2_LOW_ADDR_REMAP 0x310 -#define MV64340_PCI_1_MEMORY2_HIGH_ADDR_REMAP 0x350 -#define MV64340_PCI_1_MEMORY3_LOW_ADDR_REMAP 0x318 -#define MV64340_PCI_1_MEMORY3_HIGH_ADDR_REMAP 0x358 - -#define MV64340_CPU_PCI_0_HEADERS_RETARGET_CONTROL 0x3b0 -#define MV64340_CPU_PCI_0_HEADERS_RETARGET_BASE 0x3b8 -#define MV64340_CPU_PCI_1_HEADERS_RETARGET_CONTROL 0x3c0 -#define MV64340_CPU_PCI_1_HEADERS_RETARGET_BASE 0x3c8 -#define MV64340_CPU_GE_HEADERS_RETARGET_CONTROL 0x3d0 -#define MV64340_CPU_GE_HEADERS_RETARGET_BASE 0x3d8 -#define MV64340_CPU_IDMA_HEADERS_RETARGET_CONTROL 0x3e0 -#define MV64340_CPU_IDMA_HEADERS_RETARGET_BASE 0x3e8 - -/****************************************/ -/* CPU Control Registers */ -/****************************************/ - -#define MV64340_CPU_CONFIG 0x000 -#define MV64340_CPU_MODE 0x120 -#define MV64340_CPU_MASTER_CONTROL 0x160 -#define MV64340_CPU_CROSS_BAR_CONTROL_LOW 0x150 -#define MV64340_CPU_CROSS_BAR_CONTROL_HIGH 0x158 -#define MV64340_CPU_CROSS_BAR_TIMEOUT 0x168 - -/****************************************/ -/* SMP RegisterS */ -/****************************************/ - -#define MV64340_SMP_WHO_AM_I 0x200 -#define MV64340_SMP_CPU0_DOORBELL 0x214 -#define MV64340_SMP_CPU0_DOORBELL_CLEAR 0x21C -#define MV64340_SMP_CPU1_DOORBELL 0x224 -#define MV64340_SMP_CPU1_DOORBELL_CLEAR 0x22C -#define MV64340_SMP_CPU0_DOORBELL_MASK 0x234 -#define MV64340_SMP_CPU1_DOORBELL_MASK 0x23C -#define MV64340_SMP_SEMAPHOR0 0x244 -#define MV64340_SMP_SEMAPHOR1 0x24c -#define MV64340_SMP_SEMAPHOR2 0x254 -#define MV64340_SMP_SEMAPHOR3 0x25c -#define MV64340_SMP_SEMAPHOR4 0x264 -#define MV64340_SMP_SEMAPHOR5 0x26c -#define MV64340_SMP_SEMAPHOR6 0x274 -#define MV64340_SMP_SEMAPHOR7 0x27c - -/****************************************/ -/* CPU Sync Barrier Register */ -/****************************************/ - -#define MV64340_CPU_0_SYNC_BARRIER_TRIGGER 0x0c0 -#define MV64340_CPU_0_SYNC_BARRIER_VIRTUAL 0x0c8 -#define MV64340_CPU_1_SYNC_BARRIER_TRIGGER 0x0d0 -#define MV64340_CPU_1_SYNC_BARRIER_VIRTUAL 0x0d8 - -/****************************************/ -/* CPU Access Protect */ -/****************************************/ - -#define MV64340_CPU_PROTECT_WINDOW_0_BASE_ADDR 0x180 -#define MV64340_CPU_PROTECT_WINDOW_0_SIZE 0x188 -#define MV64340_CPU_PROTECT_WINDOW_1_BASE_ADDR 0x190 -#define MV64340_CPU_PROTECT_WINDOW_1_SIZE 0x198 -#define MV64340_CPU_PROTECT_WINDOW_2_BASE_ADDR 0x1a0 -#define MV64340_CPU_PROTECT_WINDOW_2_SIZE 0x1a8 -#define MV64340_CPU_PROTECT_WINDOW_3_BASE_ADDR 0x1b0 -#define MV64340_CPU_PROTECT_WINDOW_3_SIZE 0x1b8 - - -/****************************************/ -/* CPU Error Report */ -/****************************************/ - -#define MV64340_CPU_ERROR_ADDR_LOW 0x070 -#define MV64340_CPU_ERROR_ADDR_HIGH 0x078 -#define MV64340_CPU_ERROR_DATA_LOW 0x128 -#define MV64340_CPU_ERROR_DATA_HIGH 0x130 -#define MV64340_CPU_ERROR_PARITY 0x138 -#define MV64340_CPU_ERROR_CAUSE 0x140 -#define MV64340_CPU_ERROR_MASK 0x148 - -/****************************************/ -/* CPU Interface Debug Registers */ -/****************************************/ - -#define MV64340_PUNIT_SLAVE_DEBUG_LOW 0x360 -#define MV64340_PUNIT_SLAVE_DEBUG_HIGH 0x368 -#define MV64340_PUNIT_MASTER_DEBUG_LOW 0x370 -#define MV64340_PUNIT_MASTER_DEBUG_HIGH 0x378 -#define MV64340_PUNIT_MMASK 0x3e4 - -/****************************************/ -/* Integrated SRAM Registers */ -/****************************************/ - -#define MV64340_SRAM_CONFIG 0x380 -#define MV64340_SRAM_TEST_MODE 0X3F4 -#define MV64340_SRAM_ERROR_CAUSE 0x388 -#define MV64340_SRAM_ERROR_ADDR 0x390 -#define MV64340_SRAM_ERROR_ADDR_HIGH 0X3F8 -#define MV64340_SRAM_ERROR_DATA_LOW 0x398 -#define MV64340_SRAM_ERROR_DATA_HIGH 0x3a0 -#define MV64340_SRAM_ERROR_DATA_PARITY 0x3a8 - -/****************************************/ -/* SDRAM Configuration */ -/****************************************/ - -#define MV64340_SDRAM_CONFIG 0x1400 -#define MV64340_D_UNIT_CONTROL_LOW 0x1404 -#define MV64340_D_UNIT_CONTROL_HIGH 0x1424 -#define MV64340_SDRAM_TIMING_CONTROL_LOW 0x1408 -#define MV64340_SDRAM_TIMING_CONTROL_HIGH 0x140c -#define MV64340_SDRAM_ADDR_CONTROL 0x1410 -#define MV64340_SDRAM_OPEN_PAGES_CONTROL 0x1414 -#define MV64340_SDRAM_OPERATION 0x1418 -#define MV64340_SDRAM_MODE 0x141c -#define MV64340_EXTENDED_DRAM_MODE 0x1420 -#define MV64340_SDRAM_CROSS_BAR_CONTROL_LOW 0x1430 -#define MV64340_SDRAM_CROSS_BAR_CONTROL_HIGH 0x1434 -#define MV64340_SDRAM_CROSS_BAR_TIMEOUT 0x1438 -#define MV64340_SDRAM_ADDR_CTRL_PADS_CALIBRATION 0x14c0 -#define MV64340_SDRAM_DATA_PADS_CALIBRATION 0x14c4 - -/****************************************/ -/* SDRAM Error Report */ -/****************************************/ - -#define MV64340_SDRAM_ERROR_DATA_LOW 0x1444 -#define MV64340_SDRAM_ERROR_DATA_HIGH 0x1440 -#define MV64340_SDRAM_ERROR_ADDR 0x1450 -#define MV64340_SDRAM_RECEIVED_ECC 0x1448 -#define MV64340_SDRAM_CALCULATED_ECC 0x144c -#define MV64340_SDRAM_ECC_CONTROL 0x1454 -#define MV64340_SDRAM_ECC_ERROR_COUNTER 0x1458 - -/******************************************/ -/* Controlled Delay Line (CDL) Registers */ -/******************************************/ - -#define MV64340_DFCDL_CONFIG0 0x1480 -#define MV64340_DFCDL_CONFIG1 0x1484 -#define MV64340_DLL_WRITE 0x1488 -#define MV64340_DLL_READ 0x148c -#define MV64340_SRAM_ADDR 0x1490 -#define MV64340_SRAM_DATA0 0x1494 -#define MV64340_SRAM_DATA1 0x1498 -#define MV64340_SRAM_DATA2 0x149c -#define MV64340_DFCL_PROBE 0x14a0 - -/******************************************/ -/* Debug Registers */ -/******************************************/ - -#define MV64340_DUNIT_DEBUG_LOW 0x1460 -#define MV64340_DUNIT_DEBUG_HIGH 0x1464 -#define MV64340_DUNIT_MMASK 0X1b40 - -/****************************************/ -/* Device Parameters */ -/****************************************/ - -#define MV64340_DEVICE_BANK0_PARAMETERS 0x45c -#define MV64340_DEVICE_BANK1_PARAMETERS 0x460 -#define MV64340_DEVICE_BANK2_PARAMETERS 0x464 -#define MV64340_DEVICE_BANK3_PARAMETERS 0x468 -#define MV64340_DEVICE_BOOT_BANK_PARAMETERS 0x46c -#define MV64340_DEVICE_INTERFACE_CONTROL 0x4c0 -#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_LOW 0x4c8 -#define MV64340_DEVICE_INTERFACE_CROSS_BAR_CONTROL_HIGH 0x4cc -#define MV64340_DEVICE_INTERFACE_CROSS_BAR_TIMEOUT 0x4c4 - -/****************************************/ -/* Device interrupt registers */ -/****************************************/ - -#define MV64340_DEVICE_INTERRUPT_CAUSE 0x4d0 -#define MV64340_DEVICE_INTERRUPT_MASK 0x4d4 -#define MV64340_DEVICE_ERROR_ADDR 0x4d8 -#define MV64340_DEVICE_ERROR_DATA 0x4dc -#define MV64340_DEVICE_ERROR_PARITY 0x4e0 - -/****************************************/ -/* Device debug registers */ -/****************************************/ - -#define MV64340_DEVICE_DEBUG_LOW 0x4e4 -#define MV64340_DEVICE_DEBUG_HIGH 0x4e8 -#define MV64340_RUNIT_MMASK 0x4f0 - -/****************************************/ -/* PCI Slave Address Decoding registers */ -/****************************************/ - -#define MV64340_PCI_0_CS_0_BANK_SIZE 0xc08 -#define MV64340_PCI_1_CS_0_BANK_SIZE 0xc88 -#define MV64340_PCI_0_CS_1_BANK_SIZE 0xd08 -#define MV64340_PCI_1_CS_1_BANK_SIZE 0xd88 -#define MV64340_PCI_0_CS_2_BANK_SIZE 0xc0c -#define MV64340_PCI_1_CS_2_BANK_SIZE 0xc8c -#define MV64340_PCI_0_CS_3_BANK_SIZE 0xd0c -#define MV64340_PCI_1_CS_3_BANK_SIZE 0xd8c -#define MV64340_PCI_0_DEVCS_0_BANK_SIZE 0xc10 -#define MV64340_PCI_1_DEVCS_0_BANK_SIZE 0xc90 -#define MV64340_PCI_0_DEVCS_1_BANK_SIZE 0xd10 -#define MV64340_PCI_1_DEVCS_1_BANK_SIZE 0xd90 -#define MV64340_PCI_0_DEVCS_2_BANK_SIZE 0xd18 -#define MV64340_PCI_1_DEVCS_2_BANK_SIZE 0xd98 -#define MV64340_PCI_0_DEVCS_3_BANK_SIZE 0xc14 -#define MV64340_PCI_1_DEVCS_3_BANK_SIZE 0xc94 -#define MV64340_PCI_0_DEVCS_BOOT_BANK_SIZE 0xd14 -#define MV64340_PCI_1_DEVCS_BOOT_BANK_SIZE 0xd94 -#define MV64340_PCI_0_P2P_MEM0_BAR_SIZE 0xd1c -#define MV64340_PCI_1_P2P_MEM0_BAR_SIZE 0xd9c -#define MV64340_PCI_0_P2P_MEM1_BAR_SIZE 0xd20 -#define MV64340_PCI_1_P2P_MEM1_BAR_SIZE 0xda0 -#define MV64340_PCI_0_P2P_I_O_BAR_SIZE 0xd24 -#define MV64340_PCI_1_P2P_I_O_BAR_SIZE 0xda4 -#define MV64340_PCI_0_CPU_BAR_SIZE 0xd28 -#define MV64340_PCI_1_CPU_BAR_SIZE 0xda8 -#define MV64340_PCI_0_INTERNAL_SRAM_BAR_SIZE 0xe00 -#define MV64340_PCI_1_INTERNAL_SRAM_BAR_SIZE 0xe80 -#define MV64340_PCI_0_EXPANSION_ROM_BAR_SIZE 0xd2c -#define MV64340_PCI_1_EXPANSION_ROM_BAR_SIZE 0xd9c -#define MV64340_PCI_0_BASE_ADDR_REG_ENABLE 0xc3c -#define MV64340_PCI_1_BASE_ADDR_REG_ENABLE 0xcbc -#define MV64340_PCI_0_CS_0_BASE_ADDR_REMAP 0xc48 -#define MV64340_PCI_1_CS_0_BASE_ADDR_REMAP 0xcc8 -#define MV64340_PCI_0_CS_1_BASE_ADDR_REMAP 0xd48 -#define MV64340_PCI_1_CS_1_BASE_ADDR_REMAP 0xdc8 -#define MV64340_PCI_0_CS_2_BASE_ADDR_REMAP 0xc4c -#define MV64340_PCI_1_CS_2_BASE_ADDR_REMAP 0xccc -#define MV64340_PCI_0_CS_3_BASE_ADDR_REMAP 0xd4c -#define MV64340_PCI_1_CS_3_BASE_ADDR_REMAP 0xdcc -#define MV64340_PCI_0_CS_0_BASE_HIGH_ADDR_REMAP 0xF04 -#define MV64340_PCI_1_CS_0_BASE_HIGH_ADDR_REMAP 0xF84 -#define MV64340_PCI_0_CS_1_BASE_HIGH_ADDR_REMAP 0xF08 -#define MV64340_PCI_1_CS_1_BASE_HIGH_ADDR_REMAP 0xF88 -#define MV64340_PCI_0_CS_2_BASE_HIGH_ADDR_REMAP 0xF0C -#define MV64340_PCI_1_CS_2_BASE_HIGH_ADDR_REMAP 0xF8C -#define MV64340_PCI_0_CS_3_BASE_HIGH_ADDR_REMAP 0xF10 -#define MV64340_PCI_1_CS_3_BASE_HIGH_ADDR_REMAP 0xF90 -#define MV64340_PCI_0_DEVCS_0_BASE_ADDR_REMAP 0xc50 -#define MV64340_PCI_1_DEVCS_0_BASE_ADDR_REMAP 0xcd0 -#define MV64340_PCI_0_DEVCS_1_BASE_ADDR_REMAP 0xd50 -#define MV64340_PCI_1_DEVCS_1_BASE_ADDR_REMAP 0xdd0 -#define MV64340_PCI_0_DEVCS_2_BASE_ADDR_REMAP 0xd58 -#define MV64340_PCI_1_DEVCS_2_BASE_ADDR_REMAP 0xdd8 -#define MV64340_PCI_0_DEVCS_3_BASE_ADDR_REMAP 0xc54 -#define MV64340_PCI_1_DEVCS_3_BASE_ADDR_REMAP 0xcd4 -#define MV64340_PCI_0_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xd54 -#define MV64340_PCI_1_DEVCS_BOOTCS_BASE_ADDR_REMAP 0xdd4 -#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xd5c -#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_LOW 0xddc -#define MV64340_PCI_0_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xd60 -#define MV64340_PCI_1_P2P_MEM0_BASE_ADDR_REMAP_HIGH 0xde0 -#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xd64 -#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_LOW 0xde4 -#define MV64340_PCI_0_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xd68 -#define MV64340_PCI_1_P2P_MEM1_BASE_ADDR_REMAP_HIGH 0xde8 -#define MV64340_PCI_0_P2P_I_O_BASE_ADDR_REMAP 0xd6c -#define MV64340_PCI_1_P2P_I_O_BASE_ADDR_REMAP 0xdec -#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_LOW 0xd70 -#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_LOW 0xdf0 -#define MV64340_PCI_0_CPU_BASE_ADDR_REMAP_HIGH 0xd74 -#define MV64340_PCI_1_CPU_BASE_ADDR_REMAP_HIGH 0xdf4 -#define MV64340_PCI_0_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf00 -#define MV64340_PCI_1_INTEGRATED_SRAM_BASE_ADDR_REMAP 0xf80 -#define MV64340_PCI_0_EXPANSION_ROM_BASE_ADDR_REMAP 0xf38 -#define MV64340_PCI_1_EXPANSION_ROM_BASE_ADDR_REMAP 0xfb8 -#define MV64340_PCI_0_ADDR_DECODE_CONTROL 0xd3c -#define MV64340_PCI_1_ADDR_DECODE_CONTROL 0xdbc -#define MV64340_PCI_0_HEADERS_RETARGET_CONTROL 0xF40 -#define MV64340_PCI_1_HEADERS_RETARGET_CONTROL 0xFc0 -#define MV64340_PCI_0_HEADERS_RETARGET_BASE 0xF44 -#define MV64340_PCI_1_HEADERS_RETARGET_BASE 0xFc4 -#define MV64340_PCI_0_HEADERS_RETARGET_HIGH 0xF48 -#define MV64340_PCI_1_HEADERS_RETARGET_HIGH 0xFc8 - -/***********************************/ -/* PCI Control Register Map */ -/***********************************/ - -#define MV64340_PCI_0_DLL_STATUS_AND_COMMAND 0x1d20 -#define MV64340_PCI_1_DLL_STATUS_AND_COMMAND 0x1da0 -#define MV64340_PCI_0_MPP_PADS_DRIVE_CONTROL 0x1d1C -#define MV64340_PCI_1_MPP_PADS_DRIVE_CONTROL 0x1d9C -#define MV64340_PCI_0_COMMAND 0xc00 -#define MV64340_PCI_1_COMMAND 0xc80 -#define MV64340_PCI_0_MODE 0xd00 -#define MV64340_PCI_1_MODE 0xd80 -#define MV64340_PCI_0_RETRY 0xc04 -#define MV64340_PCI_1_RETRY 0xc84 -#define MV64340_PCI_0_READ_BUFFER_DISCARD_TIMER 0xd04 -#define MV64340_PCI_1_READ_BUFFER_DISCARD_TIMER 0xd84 -#define MV64340_PCI_0_MSI_TRIGGER_TIMER 0xc38 -#define MV64340_PCI_1_MSI_TRIGGER_TIMER 0xcb8 -#define MV64340_PCI_0_ARBITER_CONTROL 0x1d00 -#define MV64340_PCI_1_ARBITER_CONTROL 0x1d80 -#define MV64340_PCI_0_CROSS_BAR_CONTROL_LOW 0x1d08 -#define MV64340_PCI_1_CROSS_BAR_CONTROL_LOW 0x1d88 -#define MV64340_PCI_0_CROSS_BAR_CONTROL_HIGH 0x1d0c -#define MV64340_PCI_1_CROSS_BAR_CONTROL_HIGH 0x1d8c -#define MV64340_PCI_0_CROSS_BAR_TIMEOUT 0x1d04 -#define MV64340_PCI_1_CROSS_BAR_TIMEOUT 0x1d84 -#define MV64340_PCI_0_SYNC_BARRIER_TRIGGER_REG 0x1D18 -#define MV64340_PCI_1_SYNC_BARRIER_TRIGGER_REG 0x1D98 -#define MV64340_PCI_0_SYNC_BARRIER_VIRTUAL_REG 0x1d10 -#define MV64340_PCI_1_SYNC_BARRIER_VIRTUAL_REG 0x1d90 -#define MV64340_PCI_0_P2P_CONFIG 0x1d14 -#define MV64340_PCI_1_P2P_CONFIG 0x1d94 - -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_LOW 0x1e00 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_0_HIGH 0x1e04 -#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_0 0x1e08 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_LOW 0x1e10 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_1_HIGH 0x1e14 -#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_1 0x1e18 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_LOW 0x1e20 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_2_HIGH 0x1e24 -#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_2 0x1e28 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_LOW 0x1e30 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_3_HIGH 0x1e34 -#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_3 0x1e38 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_LOW 0x1e40 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_4_HIGH 0x1e44 -#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_4 0x1e48 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_LOW 0x1e50 -#define MV64340_PCI_0_ACCESS_CONTROL_BASE_5_HIGH 0x1e54 -#define MV64340_PCI_0_ACCESS_CONTROL_SIZE_5 0x1e58 - -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_LOW 0x1e80 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_0_HIGH 0x1e84 -#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_0 0x1e88 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_LOW 0x1e90 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_1_HIGH 0x1e94 -#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_1 0x1e98 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_LOW 0x1ea0 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_2_HIGH 0x1ea4 -#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_2 0x1ea8 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_LOW 0x1eb0 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_3_HIGH 0x1eb4 -#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_3 0x1eb8 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_LOW 0x1ec0 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_4_HIGH 0x1ec4 -#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_4 0x1ec8 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_LOW 0x1ed0 -#define MV64340_PCI_1_ACCESS_CONTROL_BASE_5_HIGH 0x1ed4 -#define MV64340_PCI_1_ACCESS_CONTROL_SIZE_5 0x1ed8 - -/****************************************/ -/* PCI Configuration Access Registers */ -/****************************************/ - -#define MV64340_PCI_0_CONFIG_ADDR 0xcf8 -#define MV64340_PCI_0_CONFIG_DATA_VIRTUAL_REG 0xcfc -#define MV64340_PCI_1_CONFIG_ADDR 0xc78 -#define MV64340_PCI_1_CONFIG_DATA_VIRTUAL_REG 0xc7c -#define MV64340_PCI_0_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xc34 -#define MV64340_PCI_1_INTERRUPT_ACKNOWLEDGE_VIRTUAL_REG 0xcb4 - -/****************************************/ -/* PCI Error Report Registers */ -/****************************************/ - -#define MV64340_PCI_0_SERR_MASK 0xc28 -#define MV64340_PCI_1_SERR_MASK 0xca8 -#define MV64340_PCI_0_ERROR_ADDR_LOW 0x1d40 -#define MV64340_PCI_1_ERROR_ADDR_LOW 0x1dc0 -#define MV64340_PCI_0_ERROR_ADDR_HIGH 0x1d44 -#define MV64340_PCI_1_ERROR_ADDR_HIGH 0x1dc4 -#define MV64340_PCI_0_ERROR_ATTRIBUTE 0x1d48 -#define MV64340_PCI_1_ERROR_ATTRIBUTE 0x1dc8 -#define MV64340_PCI_0_ERROR_COMMAND 0x1d50 -#define MV64340_PCI_1_ERROR_COMMAND 0x1dd0 -#define MV64340_PCI_0_ERROR_CAUSE 0x1d58 -#define MV64340_PCI_1_ERROR_CAUSE 0x1dd8 -#define MV64340_PCI_0_ERROR_MASK 0x1d5c -#define MV64340_PCI_1_ERROR_MASK 0x1ddc - -/****************************************/ -/* PCI Debug Registers */ -/****************************************/ - -#define MV64340_PCI_0_MMASK 0X1D24 -#define MV64340_PCI_1_MMASK 0X1DA4 - -/*********************************************/ -/* PCI Configuration, Function 0, Registers */ -/*********************************************/ - -#define MV64340_PCI_DEVICE_AND_VENDOR_ID 0x000 -#define MV64340_PCI_STATUS_AND_COMMAND 0x004 -#define MV64340_PCI_CLASS_CODE_AND_REVISION_ID 0x008 -#define MV64340_PCI_BIST_HEADER_TYPE_LATENCY_TIMER_CACHE_LINE 0x00C - -#define MV64340_PCI_SCS_0_BASE_ADDR_LOW 0x010 -#define MV64340_PCI_SCS_0_BASE_ADDR_HIGH 0x014 -#define MV64340_PCI_SCS_1_BASE_ADDR_LOW 0x018 -#define MV64340_PCI_SCS_1_BASE_ADDR_HIGH 0x01C -#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_LOW 0x020 -#define MV64340_PCI_INTERNAL_REG_MEM_MAPPED_BASE_ADDR_HIGH 0x024 -#define MV64340_PCI_SUBSYSTEM_ID_AND_SUBSYSTEM_VENDOR_ID 0x02c -#define MV64340_PCI_EXPANSION_ROM_BASE_ADDR_REG 0x030 -#define MV64340_PCI_CAPABILTY_LIST_POINTER 0x034 -#define MV64340_PCI_INTERRUPT_PIN_AND_LINE 0x03C - /* capability list */ -#define MV64340_PCI_POWER_MANAGEMENT_CAPABILITY 0x040 -#define MV64340_PCI_POWER_MANAGEMENT_STATUS_AND_CONTROL 0x044 -#define MV64340_PCI_VPD_ADDR 0x048 -#define MV64340_PCI_VPD_DATA 0x04c -#define MV64340_PCI_MSI_MESSAGE_CONTROL 0x050 -#define MV64340_PCI_MSI_MESSAGE_ADDR 0x054 -#define MV64340_PCI_MSI_MESSAGE_UPPER_ADDR 0x058 -#define MV64340_PCI_MSI_MESSAGE_DATA 0x05c -#define MV64340_PCI_X_COMMAND 0x060 -#define MV64340_PCI_X_STATUS 0x064 -#define MV64340_PCI_COMPACT_PCI_HOT_SWAP 0x068 - -/***********************************************/ -/* PCI Configuration, Function 1, Registers */ -/***********************************************/ - -#define MV64340_PCI_SCS_2_BASE_ADDR_LOW 0x110 -#define MV64340_PCI_SCS_2_BASE_ADDR_HIGH 0x114 -#define MV64340_PCI_SCS_3_BASE_ADDR_LOW 0x118 -#define MV64340_PCI_SCS_3_BASE_ADDR_HIGH 0x11c -#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_LOW 0x120 -#define MV64340_PCI_INTERNAL_SRAM_BASE_ADDR_HIGH 0x124 - -/***********************************************/ -/* PCI Configuration, Function 2, Registers */ -/***********************************************/ - -#define MV64340_PCI_DEVCS_0_BASE_ADDR_LOW 0x210 -#define MV64340_PCI_DEVCS_0_BASE_ADDR_HIGH 0x214 -#define MV64340_PCI_DEVCS_1_BASE_ADDR_LOW 0x218 -#define MV64340_PCI_DEVCS_1_BASE_ADDR_HIGH 0x21c -#define MV64340_PCI_DEVCS_2_BASE_ADDR_LOW 0x220 -#define MV64340_PCI_DEVCS_2_BASE_ADDR_HIGH 0x224 - -/***********************************************/ -/* PCI Configuration, Function 3, Registers */ -/***********************************************/ - -#define MV64340_PCI_DEVCS_3_BASE_ADDR_LOW 0x310 -#define MV64340_PCI_DEVCS_3_BASE_ADDR_HIGH 0x314 -#define MV64340_PCI_BOOT_CS_BASE_ADDR_LOW 0x318 -#define MV64340_PCI_BOOT_CS_BASE_ADDR_HIGH 0x31c -#define MV64340_PCI_CPU_BASE_ADDR_LOW 0x220 -#define MV64340_PCI_CPU_BASE_ADDR_HIGH 0x224 - -/***********************************************/ -/* PCI Configuration, Function 4, Registers */ -/***********************************************/ - -#define MV64340_PCI_P2P_MEM0_BASE_ADDR_LOW 0x410 -#define MV64340_PCI_P2P_MEM0_BASE_ADDR_HIGH 0x414 -#define MV64340_PCI_P2P_MEM1_BASE_ADDR_LOW 0x418 -#define MV64340_PCI_P2P_MEM1_BASE_ADDR_HIGH 0x41c -#define MV64340_PCI_P2P_I_O_BASE_ADDR 0x420 -#define MV64340_PCI_INTERNAL_REGS_I_O_MAPPED_BASE_ADDR 0x424 - -/****************************************/ -/* Messaging Unit Registers (I20) */ -/****************************************/ - -#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_0_SIDE 0x010 -#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_0_SIDE 0x014 -#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_0_SIDE 0x018 -#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_0_SIDE 0x01C -#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_0_SIDE 0x020 -#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x024 -#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x028 -#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_0_SIDE 0x02C -#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_0_SIDE 0x030 -#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_0_SIDE 0x034 -#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x040 -#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_0_SIDE 0x044 -#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_0_SIDE 0x050 -#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_0_SIDE 0x054 -#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x060 -#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x064 -#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x068 -#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x06C -#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_0_SIDE 0x070 -#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_0_SIDE 0x074 -#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_0_SIDE 0x0F8 -#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_0_SIDE 0x0FC - -#define MV64340_I2O_INBOUND_MESSAGE_REG0_PCI_1_SIDE 0x090 -#define MV64340_I2O_INBOUND_MESSAGE_REG1_PCI_1_SIDE 0x094 -#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_PCI_1_SIDE 0x098 -#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_PCI_1_SIDE 0x09C -#define MV64340_I2O_INBOUND_DOORBELL_REG_PCI_1_SIDE 0x0A0 -#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0A4 -#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0A8 -#define MV64340_I2O_OUTBOUND_DOORBELL_REG_PCI_1_SIDE 0x0AC -#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_PCI_1_SIDE 0x0B0 -#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_PCI_1_SIDE 0x0B4 -#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C0 -#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_PCI_1_SIDE 0x0C4 -#define MV64340_I2O_QUEUE_CONTROL_REG_PCI_1_SIDE 0x0D0 -#define MV64340_I2O_QUEUE_BASE_ADDR_REG_PCI_1_SIDE 0x0D4 -#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0E0 -#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0E4 -#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x0E8 -#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x0EC -#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_PCI_1_SIDE 0x0F0 -#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_PCI_1_SIDE 0x0F4 -#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_PCI_1_SIDE 0x078 -#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_PCI_1_SIDE 0x07C - -#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C10 -#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C14 -#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU0_SIDE 0x1C18 -#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU0_SIDE 0x1C1C -#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU0_SIDE 0x1C20 -#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C24 -#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C28 -#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU0_SIDE 0x1C2C -#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU0_SIDE 0x1C30 -#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU0_SIDE 0x1C34 -#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C40 -#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU0_SIDE 0x1C44 -#define MV64340_I2O_QUEUE_CONTROL_REG_CPU0_SIDE 0x1C50 -#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU0_SIDE 0x1C54 -#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C60 -#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C64 -#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1C68 -#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1C6C -#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU0_SIDE 0x1C70 -#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU0_SIDE 0x1C74 -#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU0_SIDE 0x1CF8 -#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU0_SIDE 0x1CFC -#define MV64340_I2O_INBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C90 -#define MV64340_I2O_INBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C94 -#define MV64340_I2O_OUTBOUND_MESSAGE_REG0_CPU1_SIDE 0x1C98 -#define MV64340_I2O_OUTBOUND_MESSAGE_REG1_CPU1_SIDE 0x1C9C -#define MV64340_I2O_INBOUND_DOORBELL_REG_CPU1_SIDE 0x1CA0 -#define MV64340_I2O_INBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CA4 -#define MV64340_I2O_INBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CA8 -#define MV64340_I2O_OUTBOUND_DOORBELL_REG_CPU1_SIDE 0x1CAC -#define MV64340_I2O_OUTBOUND_INTERRUPT_CAUSE_REG_CPU1_SIDE 0x1CB0 -#define MV64340_I2O_OUTBOUND_INTERRUPT_MASK_REG_CPU1_SIDE 0x1CB4 -#define MV64340_I2O_INBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC0 -#define MV64340_I2O_OUTBOUND_QUEUE_PORT_VIRTUAL_REG_CPU1_SIDE 0x1CC4 -#define MV64340_I2O_QUEUE_CONTROL_REG_CPU1_SIDE 0x1CD0 -#define MV64340_I2O_QUEUE_BASE_ADDR_REG_CPU1_SIDE 0x1CD4 -#define MV64340_I2O_INBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CE0 -#define MV64340_I2O_INBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CE4 -#define MV64340_I2O_INBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1CE8 -#define MV64340_I2O_INBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1CEC -#define MV64340_I2O_OUTBOUND_FREE_HEAD_POINTER_REG_CPU1_SIDE 0x1CF0 -#define MV64340_I2O_OUTBOUND_FREE_TAIL_POINTER_REG_CPU1_SIDE 0x1CF4 -#define MV64340_I2O_OUTBOUND_POST_HEAD_POINTER_REG_CPU1_SIDE 0x1C78 -#define MV64340_I2O_OUTBOUND_POST_TAIL_POINTER_REG_CPU1_SIDE 0x1C7C - -/****************************************/ -/* Ethernet Unit Registers */ -/****************************************/ - -/*******************************************/ -/* CUNIT Registers */ -/*******************************************/ - - /* Address Decoding Register Map */ - -#define MV64340_CUNIT_BASE_ADDR_REG0 0xf200 -#define MV64340_CUNIT_BASE_ADDR_REG1 0xf208 -#define MV64340_CUNIT_BASE_ADDR_REG2 0xf210 -#define MV64340_CUNIT_BASE_ADDR_REG3 0xf218 -#define MV64340_CUNIT_SIZE0 0xf204 -#define MV64340_CUNIT_SIZE1 0xf20c -#define MV64340_CUNIT_SIZE2 0xf214 -#define MV64340_CUNIT_SIZE3 0xf21c -#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG0 0xf240 -#define MV64340_CUNIT_HIGH_ADDR_REMAP_REG1 0xf244 -#define MV64340_CUNIT_BASE_ADDR_ENABLE_REG 0xf250 -#define MV64340_MPSC0_ACCESS_PROTECTION_REG 0xf254 -#define MV64340_MPSC1_ACCESS_PROTECTION_REG 0xf258 -#define MV64340_CUNIT_INTERNAL_SPACE_BASE_ADDR_REG 0xf25C - - /* Error Report Registers */ - -#define MV64340_CUNIT_INTERRUPT_CAUSE_REG 0xf310 -#define MV64340_CUNIT_INTERRUPT_MASK_REG 0xf314 -#define MV64340_CUNIT_ERROR_ADDR 0xf318 - - /* Cunit Control Registers */ - -#define MV64340_CUNIT_ARBITER_CONTROL_REG 0xf300 -#define MV64340_CUNIT_CONFIG_REG 0xb40c -#define MV64340_CUNIT_CRROSBAR_TIMEOUT_REG 0xf304 - - /* Cunit Debug Registers */ - -#define MV64340_CUNIT_DEBUG_LOW 0xf340 -#define MV64340_CUNIT_DEBUG_HIGH 0xf344 -#define MV64340_CUNIT_MMASK 0xf380 - - /* MPSCs Clocks Routing Registers */ - -#define MV64340_MPSC_ROUTING_REG 0xb400 -#define MV64340_MPSC_RX_CLOCK_ROUTING_REG 0xb404 -#define MV64340_MPSC_TX_CLOCK_ROUTING_REG 0xb408 - - /* MPSCs Interrupts Registers */ - -#define MV64340_MPSC_CAUSE_REG(port) (0xb804 + (port<<3)) -#define MV64340_MPSC_MASK_REG(port) (0xb884 + (port<<3)) - -#define MV64340_MPSC_MAIN_CONFIG_LOW(port) (0x8000 + (port<<12)) -#define MV64340_MPSC_MAIN_CONFIG_HIGH(port) (0x8004 + (port<<12)) -#define MV64340_MPSC_PROTOCOL_CONFIG(port) (0x8008 + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG1(port) (0x800c + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG2(port) (0x8010 + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG3(port) (0x8014 + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG4(port) (0x8018 + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG5(port) (0x801c + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG6(port) (0x8020 + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG7(port) (0x8024 + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG8(port) (0x8028 + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG9(port) (0x802c + (port<<12)) -#define MV64340_MPSC_CHANNEL_REG10(port) (0x8030 + (port<<12)) - - /* MPSC0 Registers */ - - -/***************************************/ -/* SDMA Registers */ -/***************************************/ - -#define MV64340_SDMA_CONFIG_REG(channel) (0x4000 + (channel<<13)) -#define MV64340_SDMA_COMMAND_REG(channel) (0x4008 + (channel<<13)) -#define MV64340_SDMA_CURRENT_RX_DESCRIPTOR_POINTER(channel) (0x4810 + (channel<<13)) -#define MV64340_SDMA_CURRENT_TX_DESCRIPTOR_POINTER(channel) (0x4c10 + (channel<<13)) -#define MV64340_SDMA_FIRST_TX_DESCRIPTOR_POINTER(channel) (0x4c14 + (channel<<13)) - -#define MV64340_SDMA_CAUSE_REG 0xb800 -#define MV64340_SDMA_MASK_REG 0xb880 - -/* BRG Interrupts */ - -#define MV64340_BRG_CONFIG_REG(brg) (0xb200 + (brg<<3)) -#define MV64340_BRG_BAUDE_TUNING_REG(brg) (0xb208 + (brg<<3)) -#define MV64340_BRG_CAUSE_REG 0xb834 -#define MV64340_BRG_MASK_REG 0xb8b4 - -/****************************************/ -/* DMA Channel Control */ -/****************************************/ - -#define MV64340_DMA_CHANNEL0_CONTROL 0x840 -#define MV64340_DMA_CHANNEL0_CONTROL_HIGH 0x880 -#define MV64340_DMA_CHANNEL1_CONTROL 0x844 -#define MV64340_DMA_CHANNEL1_CONTROL_HIGH 0x884 -#define MV64340_DMA_CHANNEL2_CONTROL 0x848 -#define MV64340_DMA_CHANNEL2_CONTROL_HIGH 0x888 -#define MV64340_DMA_CHANNEL3_CONTROL 0x84C -#define MV64340_DMA_CHANNEL3_CONTROL_HIGH 0x88C - - -/****************************************/ -/* IDMA Registers */ -/****************************************/ - -#define MV64340_DMA_CHANNEL0_BYTE_COUNT 0x800 -#define MV64340_DMA_CHANNEL1_BYTE_COUNT 0x804 -#define MV64340_DMA_CHANNEL2_BYTE_COUNT 0x808 -#define MV64340_DMA_CHANNEL3_BYTE_COUNT 0x80C -#define MV64340_DMA_CHANNEL0_SOURCE_ADDR 0x810 -#define MV64340_DMA_CHANNEL1_SOURCE_ADDR 0x814 -#define MV64340_DMA_CHANNEL2_SOURCE_ADDR 0x818 -#define MV64340_DMA_CHANNEL3_SOURCE_ADDR 0x81c -#define MV64340_DMA_CHANNEL0_DESTINATION_ADDR 0x820 -#define MV64340_DMA_CHANNEL1_DESTINATION_ADDR 0x824 -#define MV64340_DMA_CHANNEL2_DESTINATION_ADDR 0x828 -#define MV64340_DMA_CHANNEL3_DESTINATION_ADDR 0x82C -#define MV64340_DMA_CHANNEL0_NEXT_DESCRIPTOR_POINTER 0x830 -#define MV64340_DMA_CHANNEL1_NEXT_DESCRIPTOR_POINTER 0x834 -#define MV64340_DMA_CHANNEL2_NEXT_DESCRIPTOR_POINTER 0x838 -#define MV64340_DMA_CHANNEL3_NEXT_DESCRIPTOR_POINTER 0x83C -#define MV64340_DMA_CHANNEL0_CURRENT_DESCRIPTOR_POINTER 0x870 -#define MV64340_DMA_CHANNEL1_CURRENT_DESCRIPTOR_POINTER 0x874 -#define MV64340_DMA_CHANNEL2_CURRENT_DESCRIPTOR_POINTER 0x878 -#define MV64340_DMA_CHANNEL3_CURRENT_DESCRIPTOR_POINTER 0x87C - - /* IDMA Address Decoding Base Address Registers */ - -#define MV64340_DMA_BASE_ADDR_REG0 0xa00 -#define MV64340_DMA_BASE_ADDR_REG1 0xa08 -#define MV64340_DMA_BASE_ADDR_REG2 0xa10 -#define MV64340_DMA_BASE_ADDR_REG3 0xa18 -#define MV64340_DMA_BASE_ADDR_REG4 0xa20 -#define MV64340_DMA_BASE_ADDR_REG5 0xa28 -#define MV64340_DMA_BASE_ADDR_REG6 0xa30 -#define MV64340_DMA_BASE_ADDR_REG7 0xa38 - - /* IDMA Address Decoding Size Address Register */ - -#define MV64340_DMA_SIZE_REG0 0xa04 -#define MV64340_DMA_SIZE_REG1 0xa0c -#define MV64340_DMA_SIZE_REG2 0xa14 -#define MV64340_DMA_SIZE_REG3 0xa1c -#define MV64340_DMA_SIZE_REG4 0xa24 -#define MV64340_DMA_SIZE_REG5 0xa2c -#define MV64340_DMA_SIZE_REG6 0xa34 -#define MV64340_DMA_SIZE_REG7 0xa3C - - /* IDMA Address Decoding High Address Remap and Access - Protection Registers */ - -#define MV64340_DMA_HIGH_ADDR_REMAP_REG0 0xa60 -#define MV64340_DMA_HIGH_ADDR_REMAP_REG1 0xa64 -#define MV64340_DMA_HIGH_ADDR_REMAP_REG2 0xa68 -#define MV64340_DMA_HIGH_ADDR_REMAP_REG3 0xa6C -#define MV64340_DMA_BASE_ADDR_ENABLE_REG 0xa80 -#define MV64340_DMA_CHANNEL0_ACCESS_PROTECTION_REG 0xa70 -#define MV64340_DMA_CHANNEL1_ACCESS_PROTECTION_REG 0xa74 -#define MV64340_DMA_CHANNEL2_ACCESS_PROTECTION_REG 0xa78 -#define MV64340_DMA_CHANNEL3_ACCESS_PROTECTION_REG 0xa7c -#define MV64340_DMA_ARBITER_CONTROL 0x860 -#define MV64340_DMA_CROSS_BAR_TIMEOUT 0x8d0 - - /* IDMA Headers Retarget Registers */ - -#define MV64340_DMA_HEADERS_RETARGET_CONTROL 0xa84 -#define MV64340_DMA_HEADERS_RETARGET_BASE 0xa88 - - /* IDMA Interrupt Register */ - -#define MV64340_DMA_INTERRUPT_CAUSE_REG 0x8c0 -#define MV64340_DMA_INTERRUPT_CAUSE_MASK 0x8c4 -#define MV64340_DMA_ERROR_ADDR 0x8c8 -#define MV64340_DMA_ERROR_SELECT 0x8cc - - /* IDMA Debug Register ( for internal use ) */ - -#define MV64340_DMA_DEBUG_LOW 0x8e0 -#define MV64340_DMA_DEBUG_HIGH 0x8e4 -#define MV64340_DMA_SPARE 0xA8C - -/****************************************/ -/* Timer_Counter */ -/****************************************/ - -#define MV64340_TIMER_COUNTER0 0x850 -#define MV64340_TIMER_COUNTER1 0x854 -#define MV64340_TIMER_COUNTER2 0x858 -#define MV64340_TIMER_COUNTER3 0x85C -#define MV64340_TIMER_COUNTER_0_3_CONTROL 0x864 -#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_CAUSE 0x868 -#define MV64340_TIMER_COUNTER_0_3_INTERRUPT_MASK 0x86c - -/****************************************/ -/* Watchdog registers */ -/****************************************/ - -#define MV64340_WATCHDOG_CONFIG_REG 0xb410 -#define MV64340_WATCHDOG_VALUE_REG 0xb414 - -/****************************************/ -/* I2C Registers */ -/****************************************/ - -#define MV64XXX_I2C_OFFSET 0xc000 -#define MV64XXX_I2C_REG_BLOCK_SIZE 0x0020 - -/****************************************/ -/* GPP Interface Registers */ -/****************************************/ - -#define MV64340_GPP_IO_CONTROL 0xf100 -#define MV64340_GPP_LEVEL_CONTROL 0xf110 -#define MV64340_GPP_VALUE 0xf104 -#define MV64340_GPP_INTERRUPT_CAUSE 0xf108 -#define MV64340_GPP_INTERRUPT_MASK0 0xf10c -#define MV64340_GPP_INTERRUPT_MASK1 0xf114 -#define MV64340_GPP_VALUE_SET 0xf118 -#define MV64340_GPP_VALUE_CLEAR 0xf11c - -/****************************************/ -/* Interrupt Controller Registers */ -/****************************************/ - -/****************************************/ -/* Interrupts */ -/****************************************/ - -#define MV64340_MAIN_INTERRUPT_CAUSE_LOW 0x004 -#define MV64340_MAIN_INTERRUPT_CAUSE_HIGH 0x00c -#define MV64340_CPU_INTERRUPT0_MASK_LOW 0x014 -#define MV64340_CPU_INTERRUPT0_MASK_HIGH 0x01c -#define MV64340_CPU_INTERRUPT0_SELECT_CAUSE 0x024 -#define MV64340_CPU_INTERRUPT1_MASK_LOW 0x034 -#define MV64340_CPU_INTERRUPT1_MASK_HIGH 0x03c -#define MV64340_CPU_INTERRUPT1_SELECT_CAUSE 0x044 -#define MV64340_INTERRUPT0_MASK_0_LOW 0x054 -#define MV64340_INTERRUPT0_MASK_0_HIGH 0x05c -#define MV64340_INTERRUPT0_SELECT_CAUSE 0x064 -#define MV64340_INTERRUPT1_MASK_0_LOW 0x074 -#define MV64340_INTERRUPT1_MASK_0_HIGH 0x07c -#define MV64340_INTERRUPT1_SELECT_CAUSE 0x084 - -/****************************************/ -/* MPP Interface Registers */ -/****************************************/ - -#define MV64340_MPP_CONTROL0 0xf000 -#define MV64340_MPP_CONTROL1 0xf004 -#define MV64340_MPP_CONTROL2 0xf008 -#define MV64340_MPP_CONTROL3 0xf00c - -/****************************************/ -/* Serial Initialization registers */ -/****************************************/ - -#define MV64340_SERIAL_INIT_LAST_DATA 0xf324 -#define MV64340_SERIAL_INIT_CONTROL 0xf328 -#define MV64340_SERIAL_INIT_STATUS 0xf32c - -extern void mv64340_irq_init(unsigned int base); - -#endif /* __ASM_MV643XX_H */ diff --git a/include/linux/net.h b/include/linux/net.h index 688320b79fcc..b75bc534c1b3 100644 --- a/include/linux/net.h +++ b/include/linux/net.h @@ -322,6 +322,25 @@ static inline bool sendpage_ok(struct page *page) return !PageSlab(page) && page_count(page) >= 1; } +/* + * Check sendpage_ok on contiguous pages. + */ +static inline bool sendpages_ok(struct page *page, size_t len, size_t offset) +{ + struct page *p = page + (offset >> PAGE_SHIFT); + size_t count = 0; + + while (count < len) { + if (!sendpage_ok(p)) + return false; + + p++; + count += PAGE_SIZE; + } + + return true; +} + int kernel_sendmsg(struct socket *sock, struct msghdr *msg, struct kvec *vec, size_t num, size_t len); int kernel_sendmsg_locked(struct sock *sk, struct msghdr *msg, diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 7c2d77d75a88..66e7d26b70a4 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -24,9 +24,8 @@ enum { NETIF_F_HW_VLAN_CTAG_FILTER_BIT,/* Receive filtering on VLAN CTAGs */ NETIF_F_VLAN_CHALLENGED_BIT, /* Device cannot handle VLAN packets */ NETIF_F_GSO_BIT, /* Enable software GSO. */ - NETIF_F_LLTX_BIT, /* LockLess TX - deprecated. Please */ - /* do not use LLTX in new drivers */ - NETIF_F_NETNS_LOCAL_BIT, /* Does not change network namespaces */ + __UNUSED_NETIF_F_12, + __UNUSED_NETIF_F_13, NETIF_F_GRO_BIT, /* Generic receive offload */ NETIF_F_LRO_BIT, /* large receive offload */ @@ -59,7 +58,7 @@ enum { NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */ NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */ - NETIF_F_FCOE_MTU_BIT, /* Supports max FCoE MTU, 2158 bytes*/ + __UNUSED_NETIF_F_37, NETIF_F_NTUPLE_BIT, /* N-tuple filters supported */ NETIF_F_RXHASH_BIT, /* Receive hashing offload */ NETIF_F_RXCSUM_BIT, /* Receive checksumming offload */ @@ -106,7 +105,6 @@ enum { #define __NETIF_F(name) __NETIF_F_BIT(NETIF_F_##name##_BIT) #define NETIF_F_FCOE_CRC __NETIF_F(FCOE_CRC) -#define NETIF_F_FCOE_MTU __NETIF_F(FCOE_MTU) #define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) #define NETIF_F_FSO __NETIF_F(FSO) #define NETIF_F_GRO __NETIF_F(GRO) @@ -120,10 +118,8 @@ enum { #define NETIF_F_HW_VLAN_CTAG_TX __NETIF_F(HW_VLAN_CTAG_TX) #define NETIF_F_IP_CSUM __NETIF_F(IP_CSUM) #define NETIF_F_IPV6_CSUM __NETIF_F(IPV6_CSUM) -#define NETIF_F_LLTX __NETIF_F(LLTX) #define NETIF_F_LOOPBACK __NETIF_F(LOOPBACK) #define NETIF_F_LRO __NETIF_F(LRO) -#define NETIF_F_NETNS_LOCAL __NETIF_F(NETNS_LOCAL) #define NETIF_F_NOCACHE_COPY __NETIF_F(NOCACHE_COPY) #define NETIF_F_NTUPLE __NETIF_F(NTUPLE) #define NETIF_F_RXCSUM __NETIF_F(RXCSUM) @@ -192,8 +188,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) /* Features valid for ethtool to change */ /* = all defined minus driver/device-class-related */ -#define NETIF_F_NEVER_CHANGE (NETIF_F_VLAN_CHALLENGED | \ - NETIF_F_LLTX | NETIF_F_NETNS_LOCAL) +#define NETIF_F_NEVER_CHANGE NETIF_F_VLAN_CHALLENGED /* remember that ((t)1 << t_BITS) is undefined in C99 */ #define NETIF_F_ETHTOOL_BITS ((__NETIF_F_BIT(NETDEV_FEATURE_COUNT - 1) | \ @@ -214,9 +209,6 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) #define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | \ NETIF_F_TSO_ECN | NETIF_F_TSO_MANGLEID) -#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ - NETIF_F_FSO) - /* List of features with software fallbacks. */ #define NETIF_F_GSO_SOFTWARE (NETIF_F_ALL_TSO | NETIF_F_GSO_SCTP | \ NETIF_F_GSO_UDP_L4 | NETIF_F_GSO_FRAGLIST) diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 607009150b5f..e87b5e488325 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -40,7 +40,6 @@ #include <net/dcbnl.h> #endif #include <net/netprio_cgroup.h> - #include <linux/netdev_features.h> #include <linux/neighbour.h> #include <linux/netdevice_xmit.h> @@ -81,6 +80,7 @@ struct xdp_frame; struct xdp_metadata_ops; struct xdp_md; struct ethtool_netdev_state; +struct phy_link_topology; typedef u32 xdp_features_t; @@ -356,7 +356,7 @@ struct napi_struct { unsigned long state; int weight; - int defer_hard_irqs_count; + u32 defer_hard_irqs_count; unsigned long gro_bitmask; int (*poll)(struct napi_struct *, int); #ifdef CONFIG_NETPOLL @@ -645,9 +645,6 @@ struct netdev_queue { #ifdef CONFIG_SYSFS struct kobject kobj; #endif -#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) - int numa_node; -#endif unsigned long tx_maxrate; /* * Number of TX timeouts for this queue @@ -660,13 +657,13 @@ struct netdev_queue { #ifdef CONFIG_XDP_SOCKETS struct xsk_buff_pool *pool; #endif - /* NAPI instance for the queue - * Readers and writers must hold RTNL - */ - struct napi_struct *napi; + /* * write-mostly part */ +#ifdef CONFIG_BQL + struct dql dql; +#endif spinlock_t _xmit_lock ____cacheline_aligned_in_smp; int xmit_lock_owner; /* @@ -676,8 +673,16 @@ struct netdev_queue { unsigned long state; -#ifdef CONFIG_BQL - struct dql dql; +/* + * slow- / control-path part + */ + /* NAPI instance for the queue + * Readers and writers must hold RTNL + */ + struct napi_struct *napi; + +#if defined(CONFIG_XPS) && defined(CONFIG_NUMA) + int numa_node; #endif } ____cacheline_aligned_in_smp; @@ -1232,7 +1237,7 @@ struct netdev_net_notifier { * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], * struct net_device *dev, * const unsigned char *addr, u16 vid) - * Deletes the FDB entry from dev coresponding to addr. + * Deletes the FDB entry from dev corresponding to addr. * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev, * struct netlink_ext_ack *extack); * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb, @@ -1608,7 +1613,8 @@ struct net_device_ops { * userspace; this means that the order of these flags can change * during any kernel release. * - * You should have a pretty good reason to be extending these flags. + * You should add bitfield booleans after either net_device::priv_flags + * (hotpath) or ::threaded (slowpath) instead of extending these flags. * * @IFF_802_1Q_VLAN: 802.1Q VLAN device * @IFF_EBRIDGE: Ethernet bridging device @@ -1647,10 +1653,6 @@ struct net_device_ops { * @IFF_NO_ADDRCONF: prevent ipv6 addrconf * @IFF_TX_SKB_NO_LINEAR: device/driver is capable of xmitting frames with * skb_headlen(skb) == 0 (data starts from frag0) - * @IFF_CHANGE_PROTO_DOWN: device supports setting carrier via IFLA_PROTO_DOWN - * @IFF_SEE_ALL_HWTSTAMP_REQUESTS: device wants to see calls to - * ndo_hwtstamp_set() for all timestamp requests regardless of source, - * even if those aren't HWTSTAMP_SOURCE_NETDEV. */ enum netdev_priv_flags { IFF_802_1Q_VLAN = 1<<0, @@ -1685,42 +1687,8 @@ enum netdev_priv_flags { IFF_L3MDEV_RX_HANDLER = 1<<29, IFF_NO_ADDRCONF = BIT_ULL(30), IFF_TX_SKB_NO_LINEAR = BIT_ULL(31), - IFF_CHANGE_PROTO_DOWN = BIT_ULL(32), - IFF_SEE_ALL_HWTSTAMP_REQUESTS = BIT_ULL(33), }; -#define IFF_802_1Q_VLAN IFF_802_1Q_VLAN -#define IFF_EBRIDGE IFF_EBRIDGE -#define IFF_BONDING IFF_BONDING -#define IFF_ISATAP IFF_ISATAP -#define IFF_WAN_HDLC IFF_WAN_HDLC -#define IFF_XMIT_DST_RELEASE IFF_XMIT_DST_RELEASE -#define IFF_DONT_BRIDGE IFF_DONT_BRIDGE -#define IFF_DISABLE_NETPOLL IFF_DISABLE_NETPOLL -#define IFF_MACVLAN_PORT IFF_MACVLAN_PORT -#define IFF_BRIDGE_PORT IFF_BRIDGE_PORT -#define IFF_OVS_DATAPATH IFF_OVS_DATAPATH -#define IFF_TX_SKB_SHARING IFF_TX_SKB_SHARING -#define IFF_UNICAST_FLT IFF_UNICAST_FLT -#define IFF_TEAM_PORT IFF_TEAM_PORT -#define IFF_SUPP_NOFCS IFF_SUPP_NOFCS -#define IFF_LIVE_ADDR_CHANGE IFF_LIVE_ADDR_CHANGE -#define IFF_MACVLAN IFF_MACVLAN -#define IFF_XMIT_DST_RELEASE_PERM IFF_XMIT_DST_RELEASE_PERM -#define IFF_L3MDEV_MASTER IFF_L3MDEV_MASTER -#define IFF_NO_QUEUE IFF_NO_QUEUE -#define IFF_OPENVSWITCH IFF_OPENVSWITCH -#define IFF_L3MDEV_SLAVE IFF_L3MDEV_SLAVE -#define IFF_TEAM IFF_TEAM -#define IFF_RXFH_CONFIGURED IFF_RXFH_CONFIGURED -#define IFF_PHONY_HEADROOM IFF_PHONY_HEADROOM -#define IFF_MACSEC IFF_MACSEC -#define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER -#define IFF_FAILOVER IFF_FAILOVER -#define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE -#define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER -#define IFF_TX_SKB_NO_LINEAR IFF_TX_SKB_NO_LINEAR - /* Specifies the type of the struct net_device::ml_priv pointer */ enum netdev_ml_priv_type { ML_PRIV_NONE, @@ -1750,6 +1718,12 @@ enum netdev_reg_state { * data with strictly "high-level" data, and it has to know about * almost every data structure used in the INET module. * + * @priv_flags: flags invisible to userspace defined as bits, see + * enum netdev_priv_flags for the definitions + * @lltx: device supports lockless Tx. Deprecated for real HW + * drivers. Mainly used by logical interfaces, such as + * bonding and tunnels + * * @name: This is the first field of the "visible" part of this structure * (i.e. as seen by users in the "Space.c" file). It is the name * of the interface. @@ -1816,8 +1790,6 @@ enum netdev_reg_state { * * @flags: Interface flags (a la BSD) * @xdp_features: XDP capability supported by the device - * @priv_flags: Like 'flags' but invisible to userspace, - * see if.h for the definitions * @gflags: Global flags ( kept as legacy ) * @priv_len: Size of the ->priv flexible array * @priv: Flexible array containing private data @@ -1978,6 +1950,7 @@ enum netdev_reg_state { * @fcoe_ddp_xid: Max exchange id for FCoE LRO by ddp * * @priomap: XXX: need comments on this one + * @link_topo: Physical link topology tracking attached PHYs * @phydev: Physical device may attach itself * for hardware timestamping * @sfp_bus: attached &struct sfp_bus structure. @@ -1990,6 +1963,14 @@ enum netdev_reg_state { * * @threaded: napi threaded mode is enabled * + * @see_all_hwtstamp_requests: device wants to see calls to + * ndo_hwtstamp_set() for all timestamp requests + * regardless of source, even if those aren't + * HWTSTAMP_SOURCE_NETDEV + * @change_proto_down: device supports setting carrier via IFLA_PROTO_DOWN + * @netns_local: interface can't change network namespaces + * @fcoe_mtu: device supports maximum FCoE MTU, 2158 bytes + * * @net_notifier_list: List of per-net netdev notifier block * that follow this device when it is moved * to another network namespace. @@ -2040,7 +2021,10 @@ struct net_device { /* TX read-mostly hotpath */ __cacheline_group_begin(net_device_read_tx); - unsigned long long priv_flags; + struct_group(priv_flags_fast, + unsigned long priv_flags:32; + unsigned long lltx:1; + ); const struct net_device_ops *netdev_ops; const struct header_ops *header_ops; struct netdev_queue *_tx; @@ -2091,7 +2075,7 @@ struct net_device { unsigned int real_num_rx_queues; struct netdev_rx_queue *_rx; unsigned long gro_flush_timeout; - int napi_defer_hard_irqs; + u32 napi_defer_hard_irqs; unsigned int gro_max_size; unsigned int gro_ipv4_max_size; rx_handler_func_t __rcu *rx_handler; @@ -2369,12 +2353,19 @@ struct net_device { #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO) struct netprio_map __rcu *priomap; #endif + struct phy_link_topology *link_topo; struct phy_device *phydev; struct sfp_bus *sfp_bus; struct lock_class_key *qdisc_tx_busylock; bool proto_down; bool threaded; + /* priv_flags_slow, ungrouped to save space */ + unsigned long see_all_hwtstamp_requests:1; + unsigned long change_proto_down:1; + unsigned long netns_local:1; + unsigned long fcoe_mtu:1; + struct list_head net_notifier_list; #if IS_ENABLED(CONFIG_MACSEC) @@ -3094,8 +3085,6 @@ void dev_disable_lro(struct net_device *dev); int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb); u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb, struct net_device *sb_dev); -u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb, - struct net_device *sb_dev); int __dev_queue_xmit(struct sk_buff *skb, struct net_device *sb_dev); int __dev_direct_xmit(struct sk_buff *skb, u16 queue_id); @@ -3539,7 +3528,7 @@ static inline void netdev_tx_completed_queue(struct netdev_queue *dev_queue, dql_completed(&dev_queue->dql, bytes); /* - * Without the memory barrier there is a small possiblity that + * Without the memory barrier there is a small possibility that * netdev_tx_sent_queue will miss the update and cause the queue to * be stopped forever */ @@ -3578,6 +3567,17 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q) } /** + * netdev_tx_reset_subqueue - reset the BQL stats and state of a netdev queue + * @dev: network device + * @qid: stack index of the queue to reset + */ +static inline void netdev_tx_reset_subqueue(const struct net_device *dev, + u32 qid) +{ + netdev_tx_reset_queue(netdev_get_tx_queue(dev, qid)); +} + +/** * netdev_reset_queue - reset the packets and bytes count of a network device * @dev_queue: network device * @@ -3586,7 +3586,7 @@ static inline void netdev_tx_reset_queue(struct netdev_queue *q) */ static inline void netdev_reset_queue(struct net_device *dev_queue) { - netdev_tx_reset_queue(netdev_get_tx_queue(dev_queue, 0)); + netdev_tx_reset_subqueue(dev_queue, 0); } /** @@ -3950,8 +3950,11 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, int bpf_xdp_link_attach(const union bpf_attr *attr, struct bpf_prog *prog); u8 dev_xdp_prog_count(struct net_device *dev); +int dev_xdp_propagate(struct net_device *dev, struct netdev_bpf *bpf); u32 dev_xdp_prog_id(struct net_device *dev, enum bpf_xdp_mode mode); +u32 dev_get_min_mp_channel_count(const struct net_device *dev); + int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); int dev_forward_skb_nomtu(struct net_device *dev, struct sk_buff *skb); @@ -4449,7 +4452,7 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) } #define HARD_TX_LOCK(dev, txq, cpu) { \ - if ((dev->features & NETIF_F_LLTX) == 0) { \ + if (!(dev)->lltx) { \ __netif_tx_lock(txq, cpu); \ } else { \ __netif_tx_acquire(txq); \ @@ -4457,12 +4460,12 @@ static inline void netif_tx_unlock_bh(struct net_device *dev) } #define HARD_TX_TRYLOCK(dev, txq) \ - (((dev->features & NETIF_F_LLTX) == 0) ? \ + (!(dev)->lltx ? \ __netif_tx_trylock(txq) : \ __netif_tx_acquire(txq)) #define HARD_TX_UNLOCK(dev, txq) { \ - if ((dev->features & NETIF_F_LLTX) == 0) { \ + if (!(dev)->lltx) { \ __netif_tx_unlock(txq); \ } else { \ __netif_tx_release(txq); \ @@ -4607,7 +4610,7 @@ void dev_uc_flush(struct net_device *dev); void dev_uc_init(struct net_device *dev); /** - * __dev_uc_sync - Synchonize device's unicast list + * __dev_uc_sync - Synchronize device's unicast list * @dev: device to sync * @sync: function to call if address should be added * @unsync: function to call if address should be removed @@ -4651,7 +4654,7 @@ void dev_mc_flush(struct net_device *dev); void dev_mc_init(struct net_device *dev); /** - * __dev_mc_sync - Synchonize device's multicast list + * __dev_mc_sync - Synchronize device's multicast list * @dev: device to sync * @sync: function to call if address should be added * @unsync: function to call if address should be removed diff --git a/include/linux/netfs.h b/include/linux/netfs.h index c47443e7a97e..5eaceef41e6c 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -38,11 +38,8 @@ static inline void folio_start_private_2(struct folio *folio) folio_set_private_2(folio); } -/* Marks used on xarray-based buffers */ -#define NETFS_BUF_PUT_MARK XA_MARK_0 /* - Page needs putting */ -#define NETFS_BUF_PAGECACHE_MARK XA_MARK_1 /* - Page needs wb/dirty flag wrangling */ - enum netfs_io_source { + NETFS_SOURCE_UNKNOWN, NETFS_FILL_WITH_ZEROES, NETFS_DOWNLOAD_FROM_SERVER, NETFS_READ_FROM_CACHE, @@ -73,6 +70,7 @@ struct netfs_inode { #define NETFS_ICTX_ODIRECT 0 /* The file has DIO in progress */ #define NETFS_ICTX_UNBUFFERED 1 /* I/O should not use the pagecache */ #define NETFS_ICTX_WRITETHROUGH 2 /* Write-through caching */ +#define NETFS_ICTX_MODIFIED_ATTR 3 /* Indicate change in mtime/ctime */ }; /* @@ -133,9 +131,11 @@ static inline struct netfs_group *netfs_folio_group(struct folio *folio) struct netfs_io_stream { /* Submission tracking */ struct netfs_io_subrequest *construct; /* Op being constructed */ + size_t sreq_max_len; /* Maximum size of a subrequest */ + unsigned int sreq_max_segs; /* 0 or max number of segments in an iterator */ unsigned int submit_off; /* Folio offset we're submitting from */ unsigned int submit_len; /* Amount of data left to submit */ - unsigned int submit_max_len; /* Amount I/O can be rounded up to */ + unsigned int submit_extendable_to; /* Amount I/O can be rounded up to */ void (*prepare_write)(struct netfs_io_subrequest *subreq); void (*issue_write)(struct netfs_io_subrequest *subreq); /* Collection tracking */ @@ -176,41 +176,45 @@ struct netfs_io_subrequest { struct list_head rreq_link; /* Link in rreq->subrequests */ struct iov_iter io_iter; /* Iterator for this subrequest */ unsigned long long start; /* Where to start the I/O */ - size_t max_len; /* Maximum size of the I/O */ size_t len; /* Size of the I/O */ size_t transferred; /* Amount of data transferred */ + size_t consumed; /* Amount of read data consumed */ + size_t prev_donated; /* Amount of data donated from previous subreq */ + size_t next_donated; /* Amount of data donated from next subreq */ refcount_t ref; short error; /* 0 or error that occurred */ unsigned short debug_index; /* Index in list (for debugging output) */ unsigned int nr_segs; /* Number of segs in io_iter */ - unsigned int max_nr_segs; /* 0 or max number of segments in an iterator */ enum netfs_io_source source; /* Where to read from/write to */ unsigned char stream_nr; /* I/O stream this belongs to */ + unsigned char curr_folioq_slot; /* Folio currently being read */ + unsigned char curr_folio_order; /* Order of folio */ + struct folio_queue *curr_folioq; /* Queue segment in which current folio resides */ unsigned long flags; #define NETFS_SREQ_COPY_TO_CACHE 0 /* Set if should copy the data to the cache */ #define NETFS_SREQ_CLEAR_TAIL 1 /* Set if the rest of the read should be cleared */ -#define NETFS_SREQ_SHORT_IO 2 /* Set if the I/O was short */ #define NETFS_SREQ_SEEK_DATA_READ 3 /* Set if ->read() should SEEK_DATA first */ #define NETFS_SREQ_NO_PROGRESS 4 /* Set if we didn't manage to read any data */ #define NETFS_SREQ_ONDEMAND 5 /* Set if it's from on-demand read mode */ #define NETFS_SREQ_BOUNDARY 6 /* Set if ends on hard boundary (eg. ceph object) */ +#define NETFS_SREQ_HIT_EOF 7 /* Set if short due to EOF */ #define NETFS_SREQ_IN_PROGRESS 8 /* Unlocked when the subrequest completes */ #define NETFS_SREQ_NEED_RETRY 9 /* Set if the filesystem requests a retry */ #define NETFS_SREQ_RETRYING 10 /* Set if we're retrying */ #define NETFS_SREQ_FAILED 11 /* Set if the subreq failed unretryably */ -#define NETFS_SREQ_HIT_EOF 12 /* Set if we hit the EOF */ }; enum netfs_io_origin { NETFS_READAHEAD, /* This read was triggered by readahead */ NETFS_READPAGE, /* This read is a synchronous read */ + NETFS_READ_GAPS, /* This read is a synchronous read to fill gaps */ NETFS_READ_FOR_WRITE, /* This read is to prepare a write */ - NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */ + NETFS_DIO_READ, /* This is a direct I/O read */ NETFS_WRITEBACK, /* This write was triggered by writepages */ NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */ NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */ - NETFS_DIO_READ, /* This is a direct I/O read */ NETFS_DIO_WRITE, /* This is a direct I/O write */ + NETFS_PGPRIV2_COPY_TO_CACHE, /* [DEPRECATED] This is writing read data to the cache */ nr__netfs_io_origin } __mode(byte); @@ -227,11 +231,14 @@ struct netfs_io_request { struct address_space *mapping; /* The mapping being accessed */ struct kiocb *iocb; /* AIO completion vector */ struct netfs_cache_resources cache_resources; + struct readahead_control *ractl; /* Readahead descriptor */ struct list_head proc_link; /* Link in netfs_iorequests */ struct list_head subrequests; /* Contributory I/O operations */ struct netfs_io_stream io_streams[2]; /* Streams of parallel I/O operations */ #define NR_IO_STREAMS 2 //wreq->nr_io_streams struct netfs_group *group; /* Writeback group being written back */ + struct folio_queue *buffer; /* Head of I/O buffer */ + struct folio_queue *buffer_tail; /* Tail of I/O buffer */ struct iov_iter iter; /* Unencrypted-side iterator */ struct iov_iter io_iter; /* I/O (Encrypted-side) iterator */ void *netfs_priv; /* Private data for the netfs */ @@ -245,24 +252,23 @@ struct netfs_io_request { unsigned int nr_group_rel; /* Number of refs to release on ->group */ spinlock_t lock; /* Lock for queuing subreqs */ atomic_t nr_outstanding; /* Number of ops in progress */ - atomic_t nr_copy_ops; /* Number of copy-to-cache ops in progress */ - size_t upper_len; /* Length can be extended to here */ unsigned long long submitted; /* Amount submitted for I/O so far */ unsigned long long len; /* Length of the request */ size_t transferred; /* Amount to be indicated as transferred */ - short error; /* 0 or error that occurred */ + long error; /* 0 or error that occurred */ enum netfs_io_origin origin; /* Origin of the request */ bool direct_bv_unpin; /* T if direct_bv[] must be unpinned */ + u8 buffer_head_slot; /* First slot in ->buffer */ + u8 buffer_tail_slot; /* Next slot in ->buffer_tail */ unsigned long long i_size; /* Size of the file */ unsigned long long start; /* Start position */ atomic64_t issued_to; /* Write issuer folio cursor */ - unsigned long long contiguity; /* Tracking for gaps in the writeback sequence */ unsigned long long collected_to; /* Point we've collected to */ unsigned long long cleaned_to; /* Position we've cleaned folios to */ pgoff_t no_unlock_folio; /* Don't unlock this folio after read */ + size_t prev_donated; /* Fallback for subreq->prev_donated */ refcount_t ref; unsigned long flags; -#define NETFS_RREQ_INCOMPLETE_IO 0 /* Some ioreqs terminated short or with error */ #define NETFS_RREQ_COPY_TO_CACHE 1 /* Need to write to the cache */ #define NETFS_RREQ_NO_UNLOCK_FOLIO 2 /* Don't unlock no_unlock_folio on completion */ #define NETFS_RREQ_DONT_UNLOCK_FOLIOS 3 /* Don't unlock the folios on completion */ @@ -274,6 +280,7 @@ struct netfs_io_request { #define NETFS_RREQ_PAUSE 11 /* Pause subrequest generation */ #define NETFS_RREQ_USE_IO_ITER 12 /* Use ->io_iter rather than ->i_pages */ #define NETFS_RREQ_ALL_QUEUED 13 /* All subreqs are now queued */ +#define NETFS_RREQ_NEED_RETRY 14 /* Need to try retrying */ #define NETFS_RREQ_USE_PGPRIV2 31 /* [DEPRECATED] Use PG_private_2 to mark * write to cache on read */ const struct netfs_request_ops *netfs_ops; @@ -292,7 +299,7 @@ struct netfs_request_ops { /* Read request handling */ void (*expand_readahead)(struct netfs_io_request *rreq); - bool (*clamp_length)(struct netfs_io_subrequest *subreq); + int (*prepare_read)(struct netfs_io_subrequest *subreq); void (*issue_read)(struct netfs_io_subrequest *subreq); bool (*is_still_valid)(struct netfs_io_request *rreq); int (*check_write_begin)(struct file *file, loff_t pos, unsigned len, @@ -422,7 +429,10 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp); vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group); /* (Sub)request management API. */ -void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); +void netfs_read_subreq_progress(struct netfs_io_subrequest *subreq, + bool was_async); +void netfs_read_subreq_terminated(struct netfs_io_subrequest *subreq, + int error, bool was_async); void netfs_get_subrequest(struct netfs_io_subrequest *subreq, enum netfs_sreq_ref_trace what); void netfs_put_subrequest(struct netfs_io_subrequest *subreq, diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index bd19c4b91e31..cd4e28db0cbd 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h @@ -64,6 +64,7 @@ int netpoll_setup(struct netpoll *np); void __netpoll_cleanup(struct netpoll *np); void __netpoll_free(struct netpoll *np); void netpoll_cleanup(struct netpoll *np); +void do_netpoll_cleanup(struct netpoll *np); netdev_tx_t netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); #ifdef CONFIG_NETPOLL diff --git a/include/linux/nfs.h b/include/linux/nfs.h index ceb70a926b95..9ad727ddfedb 100644 --- a/include/linux/nfs.h +++ b/include/linux/nfs.h @@ -8,11 +8,20 @@ #ifndef _LINUX_NFS_H #define _LINUX_NFS_H +#include <linux/cred.h> +#include <linux/sunrpc/auth.h> #include <linux/sunrpc/msg_prot.h> #include <linux/string.h> #include <linux/crc32.h> #include <uapi/linux/nfs.h> +/* The LOCALIO program is entirely private to Linux and is + * NOT part of the uapi. + */ +#define NFS_LOCALIO_PROGRAM 400122 +#define LOCALIOPROC_NULL 0 +#define LOCALIOPROC_UUID_IS_LOCAL 1 + /* * This is the kernel NFS client file handle representation */ diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index f9df88091c6d..8d7430d9f218 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h @@ -281,15 +281,18 @@ enum nfsstat4 { /* nfs42 */ NFS4ERR_PARTNER_NOTSUPP = 10088, NFS4ERR_PARTNER_NO_AUTH = 10089, - NFS4ERR_UNION_NOTSUPP = 10090, - NFS4ERR_OFFLOAD_DENIED = 10091, - NFS4ERR_WRONG_LFS = 10092, - NFS4ERR_BADLABEL = 10093, - NFS4ERR_OFFLOAD_NO_REQS = 10094, + NFS4ERR_UNION_NOTSUPP = 10090, + NFS4ERR_OFFLOAD_DENIED = 10091, + NFS4ERR_WRONG_LFS = 10092, + NFS4ERR_BADLABEL = 10093, + NFS4ERR_OFFLOAD_NO_REQS = 10094, /* xattr (RFC8276) */ - NFS4ERR_NOXATTR = 10095, - NFS4ERR_XATTR2BIG = 10096, + NFS4ERR_NOXATTR = 10095, + NFS4ERR_XATTR2BIG = 10096, + + /* can be used for internal errors */ + NFS4ERR_FIRST_FREE }; /* error codes for internal client use */ diff --git a/include/linux/nfs_common.h b/include/linux/nfs_common.h new file mode 100644 index 000000000000..5fc02df88252 --- /dev/null +++ b/include/linux/nfs_common.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This file contains constants and methods used by both NFS client and server. + */ +#ifndef _LINUX_NFS_COMMON_H +#define _LINUX_NFS_COMMON_H + +#include <linux/errno.h> +#include <uapi/linux/nfs.h> + +/* Mapping from NFS error code to "errno" error code. */ +#define errno_NFSERR_IO EIO + +int nfs_stat_to_errno(enum nfs_stat status); +int nfs4_stat_to_errno(int stat); + +#endif /* _LINUX_NFS_COMMON_H */ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 1df86ab98c77..853df3fcd4c2 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -8,6 +8,7 @@ #include <linux/wait.h> #include <linux/nfs_xdr.h> #include <linux/sunrpc/xprt.h> +#include <linux/nfslocalio.h> #include <linux/atomic.h> #include <linux/refcount.h> @@ -49,6 +50,7 @@ struct nfs_client { #define NFS_CS_DS 7 /* - Server is a DS */ #define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */ #define NFS_CS_PNFS 9 /* - Server used for pnfs */ +#define NFS_CS_LOCAL_IO 10 /* - client is local */ struct sockaddr_storage cl_addr; /* server identifier */ size_t cl_addrlen; char * cl_hostname; /* hostname of server */ @@ -125,6 +127,13 @@ struct nfs_client { struct net *cl_net; struct list_head pending_cb_stateids; struct rcu_head rcu; + +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + struct timespec64 cl_nfssvc_boot; + seqlock_t cl_boot_lock; + nfs_uuid_t cl_uuid; + spinlock_t cl_localio_lock; +#endif /* CONFIG_NFS_LOCALIO */ }; /* @@ -158,6 +167,7 @@ struct nfs_server { #define NFS_MOUNT_WRITE_WAIT 0x02000000 #define NFS_MOUNT_TRUNK_DISCOVERY 0x04000000 #define NFS_MOUNT_SHUTDOWN 0x08000000 +#define NFS_MOUNT_NO_ALIGNWRITE 0x10000000 unsigned int fattr_valid; /* Valid attributes */ unsigned int caps; /* server capabilities */ @@ -234,8 +244,7 @@ struct nfs_server { /* the following fields are protected by nfs_client->cl_lock */ struct rb_root state_owners; #endif - struct ida openowner_id; - struct ida lockowner_id; + atomic64_t owner_ctr; struct list_head state_owners_lru; struct list_head layouts; struct list_head delegations; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 45623af3e7b8..12d8e47bc5a3 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -446,7 +446,7 @@ struct nfs42_clone_res { struct stateowner_id { __u64 create_time; - __u32 uniquifier; + __u64 uniquifier; }; struct nfs4_open_delegation { @@ -1854,6 +1854,24 @@ struct nfs_rpc_ops { }; /* + * Helper functions used by NFS client and/or server + */ +static inline void encode_opaque_fixed(struct xdr_stream *xdr, + const void *buf, size_t len) +{ + WARN_ON_ONCE(xdr_stream_encode_opaque_fixed(xdr, buf, len) < 0); +} + +static inline int decode_opaque_fixed(struct xdr_stream *xdr, + void *buf, size_t len) +{ + ssize_t ret = xdr_stream_decode_opaque_fixed(xdr, buf, len); + if (unlikely(ret < 0)) + return -EIO; + return 0; +} + +/* * Function vectors etc. for the NFS client */ extern const struct nfs_rpc_ops nfs_v2_clientops; @@ -1866,4 +1884,4 @@ extern const struct rpc_version nfs_version4; extern const struct rpc_version nfsacl_version3; extern const struct rpc_program nfsacl_program; -#endif +#endif /* _LINUX_NFS_XDR_H */ diff --git a/include/linux/nfslocalio.h b/include/linux/nfslocalio.h new file mode 100644 index 000000000000..b353abe00357 --- /dev/null +++ b/include/linux/nfslocalio.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2024 Mike Snitzer <snitzer@hammerspace.com> + * Copyright (C) 2024 NeilBrown <neilb@suse.de> + */ +#ifndef __LINUX_NFSLOCALIO_H +#define __LINUX_NFSLOCALIO_H + +/* nfsd_file structure is purposely kept opaque to NFS client */ +struct nfsd_file; + +#if IS_ENABLED(CONFIG_NFS_LOCALIO) + +#include <linux/module.h> +#include <linux/list.h> +#include <linux/uuid.h> +#include <linux/sunrpc/clnt.h> +#include <linux/sunrpc/svcauth.h> +#include <linux/nfs.h> +#include <net/net_namespace.h> + +/* + * Useful to allow a client to negotiate if localio + * possible with its server. + * + * See Documentation/filesystems/nfs/localio.rst for more detail. + */ +typedef struct { + uuid_t uuid; + struct list_head list; + struct net __rcu *net; /* nfsd's network namespace */ + struct auth_domain *dom; /* auth_domain for localio */ +} nfs_uuid_t; + +void nfs_uuid_begin(nfs_uuid_t *); +void nfs_uuid_end(nfs_uuid_t *); +void nfs_uuid_is_local(const uuid_t *, struct list_head *, + struct net *, struct auth_domain *, struct module *); +void nfs_uuid_invalidate_clients(struct list_head *list); +void nfs_uuid_invalidate_one_client(nfs_uuid_t *nfs_uuid); + +/* localio needs to map filehandle -> struct nfsd_file */ +extern struct nfsd_file * +nfsd_open_local_fh(struct net *, struct auth_domain *, struct rpc_clnt *, + const struct cred *, const struct nfs_fh *, + const fmode_t) __must_hold(rcu); + +struct nfsd_localio_operations { + bool (*nfsd_serv_try_get)(struct net *); + void (*nfsd_serv_put)(struct net *); + struct nfsd_file *(*nfsd_open_local_fh)(struct net *, + struct auth_domain *, + struct rpc_clnt *, + const struct cred *, + const struct nfs_fh *, + const fmode_t); + void (*nfsd_file_put_local)(struct nfsd_file *); + struct file *(*nfsd_file_file)(struct nfsd_file *); +} ____cacheline_aligned; + +extern void nfsd_localio_ops_init(void); +extern const struct nfsd_localio_operations *nfs_to; + +struct nfsd_file *nfs_open_local_fh(nfs_uuid_t *, + struct rpc_clnt *, const struct cred *, + const struct nfs_fh *, const fmode_t); + +#else /* CONFIG_NFS_LOCALIO */ +static inline void nfsd_localio_ops_init(void) +{ +} +#endif /* CONFIG_NFS_LOCALIO */ + +#endif /* __LINUX_NFSLOCALIO_H */ diff --git a/include/linux/numa.h b/include/linux/numa.h index eb19503604fe..3567e40329eb 100644 --- a/include/linux/numa.h +++ b/include/linux/numa.h @@ -30,6 +30,12 @@ static inline bool numa_valid_node(int nid) #ifdef CONFIG_NUMA #include <asm/sparsemem.h> +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[nid]) + +void __init alloc_node_data(int nid); +void __init alloc_offline_node_data(int nid); + /* Generic implementation available */ int numa_nearest_node(int node, unsigned int state); @@ -57,6 +63,8 @@ static inline int phys_to_target_node(u64 start) { return 0; } + +static inline void alloc_offline_node_data(int nid) {} #endif #define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE) diff --git a/include/linux/numa_memblks.h b/include/linux/numa_memblks.h new file mode 100644 index 000000000000..cfad6ce7e1bd --- /dev/null +++ b/include/linux/numa_memblks.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __NUMA_MEMBLKS_H +#define __NUMA_MEMBLKS_H + +#ifdef CONFIG_NUMA_MEMBLKS +#include <linux/types.h> + +#define NR_NODE_MEMBLKS (MAX_NUMNODES * 2) + +void __init numa_set_distance(int from, int to, int distance); +void __init numa_reset_distance(void); + +struct numa_memblk { + u64 start; + u64 end; + int nid; +}; + +struct numa_meminfo { + int nr_blks; + struct numa_memblk blk[NR_NODE_MEMBLKS]; +}; + +int __init numa_add_memblk(int nodeid, u64 start, u64 end); +void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi); + +int __init numa_cleanup_meminfo(struct numa_meminfo *mi); + +int __init numa_memblks_init(int (*init_func)(void), + bool memblock_force_top_down); + +#ifdef CONFIG_NUMA_EMU +int numa_emu_cmdline(char *str); +void __init numa_emu_update_cpu_to_node(int *emu_nid_to_phys, + unsigned int nr_emu_nids); +u64 __init numa_emu_dma_end(void); +void __init numa_emulation(struct numa_meminfo *numa_meminfo, + int numa_dist_cnt); +#else +static inline void numa_emulation(struct numa_meminfo *numa_meminfo, + int numa_dist_cnt) +{ } +static inline int numa_emu_cmdline(char *str) +{ + return -EINVAL; +} +#endif /* CONFIG_NUMA_EMU */ + +#ifdef CONFIG_NUMA_KEEP_MEMINFO +extern int phys_to_target_node(u64 start); +#define phys_to_target_node phys_to_target_node +extern int memory_add_physaddr_to_nid(u64 start); +#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid +#endif /* CONFIG_NUMA_KEEP_MEMINFO */ + +#endif /* CONFIG_NUMA_MEMBLKS */ + +#endif /* __NUMA_MEMBLKS_H */ diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h index e10333d78dbb..19d2b256180f 100644 --- a/include/linux/nvme-keyring.h +++ b/include/linux/nvme-keyring.h @@ -12,7 +12,7 @@ key_serial_t nvme_tls_psk_default(struct key *keyring, const char *hostnqn, const char *subnqn); key_serial_t nvme_keyring_id(void); - +struct key *nvme_tls_key_lookup(key_serial_t key_id); #else static inline key_serial_t nvme_tls_psk_default(struct key *keyring, @@ -24,5 +24,9 @@ static inline key_serial_t nvme_keyring_id(void) { return 0; } +static inline struct key *nvme_tls_key_lookup(key_serial_t key_id) +{ + return ERR_PTR(-ENOTSUPP); +} #endif /* !CONFIG_NVME_KEYRING */ #endif /* _NVME_KEYRING_H */ diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h index eb2f04d636c8..97c5f00b9aa3 100644 --- a/include/linux/nvme-rdma.h +++ b/include/linux/nvme-rdma.h @@ -25,6 +25,7 @@ enum nvme_rdma_cm_status { NVME_RDMA_CM_NO_RSC = 0x06, NVME_RDMA_CM_INVALID_IRD = 0x07, NVME_RDMA_CM_INVALID_ORD = 0x08, + NVME_RDMA_CM_INVALID_CNTLID = 0x09, }; static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) @@ -46,6 +47,8 @@ static inline const char *nvme_rdma_cm_msg(enum nvme_rdma_cm_status status) return "invalid IRD"; case NVME_RDMA_CM_INVALID_ORD: return "Invalid ORD"; + case NVME_RDMA_CM_INVALID_CNTLID: + return "invalid controller ID"; default: return "unrecognized reason"; } @@ -64,7 +67,8 @@ struct nvme_rdma_cm_req { __le16 qid; __le16 hrqsize; __le16 hsqsize; - u8 rsvd[24]; + __le16 cntlid; + u8 rsvd[22]; }; /** diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 7b2ae2e43544..b58d9405d65e 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -987,8 +987,8 @@ struct nvme_rw_command { __le16 control; __le32 dsmgmt; __le32 reftag; - __le16 apptag; - __le16 appmask; + __le16 lbat; + __le16 lbatm; }; enum { @@ -1057,8 +1057,8 @@ struct nvme_write_zeroes_cmd { __le16 control; __le32 dsmgmt; __le32 reftag; - __le16 apptag; - __le16 appmask; + __le16 lbat; + __le16 lbatm; }; enum nvme_zone_mgmt_action { diff --git a/include/linux/oa_tc6.h b/include/linux/oa_tc6.h new file mode 100644 index 000000000000..15f58e3c56c7 --- /dev/null +++ b/include/linux/oa_tc6.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* + * OPEN Alliance 10BASE‑T1x MAC‑PHY Serial Interface framework + * + * Link: https://opensig.org/download/document/OPEN_Alliance_10BASET1x_MAC-PHY_Serial_Interface_V1.1.pdf + * + * Author: Parthiban Veerasooran <parthiban.veerasooran@microchip.com> + */ + +#include <linux/etherdevice.h> +#include <linux/spi/spi.h> + +struct oa_tc6; + +struct oa_tc6 *oa_tc6_init(struct spi_device *spi, struct net_device *netdev); +void oa_tc6_exit(struct oa_tc6 *tc6); +int oa_tc6_write_register(struct oa_tc6 *tc6, u32 address, u32 value); +int oa_tc6_write_registers(struct oa_tc6 *tc6, u32 address, u32 value[], + u8 length); +int oa_tc6_read_register(struct oa_tc6 *tc6, u32 address, u32 *value); +int oa_tc6_read_registers(struct oa_tc6 *tc6, u32 address, u32 value[], + u8 length); +netdev_tx_t oa_tc6_start_xmit(struct oa_tc6 *tc6, struct sk_buff *skb); +int oa_tc6_zero_align_receive_frame_enable(struct oa_tc6 *tc6); diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index 082841908fe7..c9e3843d2dd5 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h @@ -84,13 +84,3 @@ extern void gpmc_read_settings_dt(struct device_node *np, struct gpmc_timings; struct omap_nand_platform_data; struct omap_onenand_platform_data; - -#if IS_ENABLED(CONFIG_MTD_ONENAND_OMAP2) -extern int gpmc_onenand_init(struct omap_onenand_platform_data *d); -#else -#define board_onenand_data NULL -static inline int gpmc_onenand_init(struct omap_onenand_platform_data *d) -{ - return 0; -} -#endif diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 5769fe6e4950..1b3a76710487 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -66,8 +66,6 @@ * PG_referenced, PG_reclaim are used for page reclaim for anonymous and * file-backed pagecache (see mm/vmscan.c). * - * PG_error is set to indicate that an I/O error occurred on this page. - * * PG_arch_1 is an architecture specific page state bit. The generic code * guarantees that this bit is cleared for a page when it first is entered into * the page cache. @@ -103,22 +101,18 @@ enum pageflags { PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */ PG_active, PG_workingset, - PG_error, - PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/ + PG_owner_priv_1, /* Owner use. If pagecache, fs may use */ + PG_owner_2, /* Owner use. If pagecache, fs may use */ PG_arch_1, PG_reserved, PG_private, /* If pagecache, has fs-private data */ PG_private_2, /* If pagecache, has fs aux data */ - PG_mappedtodisk, /* Has blocks allocated on-disk */ PG_reclaim, /* To be reclaimed asap */ PG_swapbacked, /* Page is backed by RAM/swap */ PG_unevictable, /* Page is "unevictable" */ #ifdef CONFIG_MMU PG_mlocked, /* Page is vma mlocked */ #endif -#ifdef CONFIG_ARCH_USES_PG_UNCACHED - PG_uncached, /* Page has been mapped as uncached */ -#endif #ifdef CONFIG_MEMORY_FAILURE PG_hwpoison, /* hardware poisoned page. Don't touch */ #endif @@ -126,14 +120,21 @@ enum pageflags { PG_young, PG_idle, #endif -#ifdef CONFIG_ARCH_USES_PG_ARCH_X +#ifdef CONFIG_ARCH_USES_PG_ARCH_2 PG_arch_2, +#endif +#ifdef CONFIG_ARCH_USES_PG_ARCH_3 PG_arch_3, #endif __NR_PAGEFLAGS, PG_readahead = PG_reclaim, + /* Anonymous memory (and shmem) */ + PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ + /* Some filesystems */ + PG_checked = PG_owner_priv_1, + /* * Depending on the way an anonymous folio can be mapped into a page * table (e.g., single PMD/PUD/CONT of the head page vs. PTE-mapped @@ -141,13 +142,13 @@ enum pageflags { * tail pages of an anonymous folio. For now, we only expect it to be * set on tail pages for PTE-mapped THP. */ - PG_anon_exclusive = PG_mappedtodisk, - - /* Filesystems */ - PG_checked = PG_owner_priv_1, + PG_anon_exclusive = PG_owner_2, - /* SwapBacked */ - PG_swapcache = PG_owner_priv_1, /* Swap page: swp_entry_t in private */ + /* + * Set if all buffer heads in the folio are mapped. + * Filesystems which do not use BHs can use it for their own purpose. + */ + PG_mappedtodisk = PG_owner_2, /* Two page bits are conscripted by FS-Cache to maintain local caching * state. These bits are set on pages belonging to the netfs's inodes @@ -183,8 +184,9 @@ enum pageflags { */ /* At least one page in this folio has the hwpoison flag set */ - PG_has_hwpoisoned = PG_error, + PG_has_hwpoisoned = PG_active, PG_large_rmappable = PG_workingset, /* anon or file-backed */ + PG_partially_mapped = PG_reclaim, /* was identified to be partially mapped */ }; #define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1) @@ -235,7 +237,7 @@ static __always_inline int page_is_fake_head(const struct page *page) return page_fixed_fake_head(page) != page; } -static inline unsigned long _compound_head(const struct page *page) +static __always_inline unsigned long _compound_head(const struct page *page) { unsigned long head = READ_ONCE(page->compound_head); @@ -506,7 +508,6 @@ static inline int TestClearPage##uname(struct page *page) { return 0; } __PAGEFLAG(Locked, locked, PF_NO_TAIL) FOLIO_FLAG(waiters, FOLIO_HEAD_PAGE) -PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL) FOLIO_FLAG(referenced, FOLIO_HEAD_PAGE) FOLIO_TEST_CLEAR_FLAG(referenced, FOLIO_HEAD_PAGE) __FOLIO_SET_FLAG(referenced, FOLIO_HEAD_PAGE) @@ -514,8 +515,9 @@ PAGEFLAG(Dirty, dirty, PF_HEAD) TESTSCFLAG(Dirty, dirty, PF_HEAD) __CLEARPAGEFLAG(Dirty, dirty, PF_HEAD) PAGEFLAG(LRU, lru, PF_HEAD) __CLEARPAGEFLAG(LRU, lru, PF_HEAD) TESTCLEARFLAG(LRU, lru, PF_HEAD) -PAGEFLAG(Active, active, PF_HEAD) __CLEARPAGEFLAG(Active, active, PF_HEAD) - TESTCLEARFLAG(Active, active, PF_HEAD) +FOLIO_FLAG(active, FOLIO_HEAD_PAGE) + __FOLIO_CLEAR_FLAG(active, FOLIO_HEAD_PAGE) + FOLIO_TEST_CLEAR_FLAG(active, FOLIO_HEAD_PAGE) PAGEFLAG(Workingset, workingset, PF_HEAD) TESTCLEARFLAG(Workingset, workingset, PF_HEAD) PAGEFLAG(Checked, checked, PF_NO_COMPOUND) /* Used by some filesystems */ @@ -531,9 +533,9 @@ PAGEFLAG(XenRemapped, xen_remapped, PF_NO_COMPOUND) PAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __CLEARPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) __SETPAGEFLAG(Reserved, reserved, PF_NO_COMPOUND) -PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) - __CLEARPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) - __SETPAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) +FOLIO_FLAG(swapbacked, FOLIO_HEAD_PAGE) + __FOLIO_CLEAR_FLAG(swapbacked, FOLIO_HEAD_PAGE) + __FOLIO_SET_FLAG(swapbacked, FOLIO_HEAD_PAGE) /* * Private page markings that may be used by the filesystem that owns the page @@ -542,8 +544,9 @@ PAGEFLAG(SwapBacked, swapbacked, PF_NO_TAIL) */ PAGEFLAG(Private, private, PF_ANY) PAGEFLAG(Private2, private_2, PF_ANY) TESTSCFLAG(Private2, private_2, PF_ANY) -PAGEFLAG(OwnerPriv1, owner_priv_1, PF_ANY) - TESTCLEARFLAG(OwnerPriv1, owner_priv_1, PF_ANY) + +/* owner_2 can be set on tail pages for anon memory */ +FOLIO_FLAG(owner_2, FOLIO_HEAD_PAGE) /* * Only test-and-set exist for PG_writeback. The unconditional operators are @@ -556,8 +559,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL) /* PG_readahead is only used for reads; PG_reclaim is only for writes */ PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL) TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL) -PAGEFLAG(Readahead, readahead, PF_NO_COMPOUND) - TESTCLEARFLAG(Readahead, readahead, PF_NO_COMPOUND) +FOLIO_FLAG(readahead, FOLIO_HEAD_PAGE) + FOLIO_TEST_CLEAR_FLAG(readahead, FOLIO_HEAD_PAGE) #ifdef CONFIG_HIGHMEM /* @@ -577,34 +580,26 @@ static __always_inline bool folio_test_swapcache(const struct folio *folio) test_bit(PG_swapcache, const_folio_flags(folio, 0)); } -static __always_inline bool PageSwapCache(const struct page *page) -{ - return folio_test_swapcache(page_folio(page)); -} - -SETPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) -CLEARPAGEFLAG(SwapCache, swapcache, PF_NO_TAIL) +FOLIO_SET_FLAG(swapcache, FOLIO_HEAD_PAGE) +FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE) #else -PAGEFLAG_FALSE(SwapCache, swapcache) +FOLIO_FLAG_FALSE(swapcache) #endif -PAGEFLAG(Unevictable, unevictable, PF_HEAD) - __CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) - TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) +FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE) + __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE) + FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE) #ifdef CONFIG_MMU -PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) - __CLEARPAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) - TESTSCFLAG(Mlocked, mlocked, PF_NO_TAIL) -#else -PAGEFLAG_FALSE(Mlocked, mlocked) __CLEARPAGEFLAG_NOOP(Mlocked, mlocked) - TESTSCFLAG_FALSE(Mlocked, mlocked) -#endif - -#ifdef CONFIG_ARCH_USES_PG_UNCACHED -PAGEFLAG(Uncached, uncached, PF_NO_COMPOUND) +FOLIO_FLAG(mlocked, FOLIO_HEAD_PAGE) + __FOLIO_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE) + FOLIO_TEST_CLEAR_FLAG(mlocked, FOLIO_HEAD_PAGE) + FOLIO_TEST_SET_FLAG(mlocked, FOLIO_HEAD_PAGE) #else -PAGEFLAG_FALSE(Uncached, uncached) +FOLIO_FLAG_FALSE(mlocked) + __FOLIO_CLEAR_FLAG_NOOP(mlocked) + FOLIO_TEST_CLEAR_FLAG_FALSE(mlocked) + FOLIO_TEST_SET_FLAG_FALSE(mlocked) #endif #ifdef CONFIG_MEMORY_FAILURE @@ -865,8 +860,18 @@ static inline void ClearPageCompound(struct page *page) ClearPageHead(page); } FOLIO_FLAG(large_rmappable, FOLIO_SECOND_PAGE) +FOLIO_TEST_FLAG(partially_mapped, FOLIO_SECOND_PAGE) +/* + * PG_partially_mapped is protected by deferred_split split_queue_lock, + * so its safe to use non-atomic set/clear. + */ +__FOLIO_SET_FLAG(partially_mapped, FOLIO_SECOND_PAGE) +__FOLIO_CLEAR_FLAG(partially_mapped, FOLIO_SECOND_PAGE) #else FOLIO_FLAG_FALSE(large_rmappable) +FOLIO_TEST_FLAG_FALSE(partially_mapped) +__FOLIO_SET_FLAG_NOOP(partially_mapped) +__FOLIO_CLEAR_FLAG_NOOP(partially_mapped) #endif #define PG_head_mask ((1UL << PG_head)) @@ -927,79 +932,74 @@ PAGEFLAG_FALSE(HasHWPoisoned, has_hwpoisoned) #endif /* - * For pages that are never mapped to userspace, - * page_type may be used. Because it is initialised to -1, we invert the - * sense of the bit, so __SetPageFoo *clears* the bit used for PageFoo, and - * __ClearPageFoo *sets* the bit used for PageFoo. We reserve a few high and - * low bits so that an underflow or overflow of _mapcount won't be - * mistaken for a page type value. + * For pages that do not use mapcount, page_type may be used. + * The low 24 bits of pagetype may be used for your own purposes, as long + * as you are careful to not affect the top 8 bits. The low bits of + * pagetype will be overwritten when you clear the page_type from the page. */ - enum pagetype { - PG_buddy = 0x40000000, - PG_offline = 0x20000000, - PG_table = 0x10000000, - PG_guard = 0x08000000, - PG_hugetlb = 0x04000000, - PG_slab = 0x02000000, - PG_zsmalloc = 0x01000000, - - PAGE_TYPE_BASE = 0x80000000, - - /* - * Reserve 0xffff0000 - 0xfffffffe to catch _mapcount underflows and - * allow owners that set a type to reuse the lower 16 bit for their own - * purposes. - */ - PAGE_MAPCOUNT_RESERVE = ~0x0000ffff, + /* 0x00-0x7f are positive numbers, ie mapcount */ + /* Reserve 0x80-0xef for mapcount overflow. */ + PGTY_buddy = 0xf0, + PGTY_offline = 0xf1, + PGTY_table = 0xf2, + PGTY_guard = 0xf3, + PGTY_hugetlb = 0xf4, + PGTY_slab = 0xf5, + PGTY_zsmalloc = 0xf6, + PGTY_unaccepted = 0xf7, + + PGTY_mapcount_underflow = 0xff }; -#define PageType(page, flag) \ - ((READ_ONCE(page->page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) -#define folio_test_type(folio, flag) \ - ((READ_ONCE(folio->page.page_type) & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE) +static inline bool page_type_has_type(int page_type) +{ + return page_type < (PGTY_mapcount_underflow << 24); +} -static inline int page_type_has_type(unsigned int page_type) +/* This takes a mapcount which is one more than page->_mapcount */ +static inline bool page_mapcount_is_type(unsigned int mapcount) { - return (int)page_type < PAGE_MAPCOUNT_RESERVE; + return page_type_has_type(mapcount - 1); } -static inline int page_has_type(const struct page *page) +static inline bool page_has_type(const struct page *page) { - return page_type_has_type(READ_ONCE(page->page_type)); + return page_mapcount_is_type(data_race(page->page_type)); } #define FOLIO_TYPE_OPS(lname, fname) \ -static __always_inline bool folio_test_##fname(const struct folio *folio)\ +static __always_inline bool folio_test_##fname(const struct folio *folio) \ { \ - return folio_test_type(folio, PG_##lname); \ + return data_race(folio->page.page_type >> 24) == PGTY_##lname; \ } \ static __always_inline void __folio_set_##fname(struct folio *folio) \ { \ - VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \ - folio->page.page_type &= ~PG_##lname; \ + VM_BUG_ON_FOLIO(data_race(folio->page.page_type) != UINT_MAX, \ + folio); \ + folio->page.page_type = (unsigned int)PGTY_##lname << 24; \ } \ static __always_inline void __folio_clear_##fname(struct folio *folio) \ { \ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \ - folio->page.page_type |= PG_##lname; \ + folio->page.page_type = UINT_MAX; \ } #define PAGE_TYPE_OPS(uname, lname, fname) \ FOLIO_TYPE_OPS(lname, fname) \ static __always_inline int Page##uname(const struct page *page) \ { \ - return PageType(page, PG_##lname); \ + return data_race(page->page_type >> 24) == PGTY_##lname; \ } \ static __always_inline void __SetPage##uname(struct page *page) \ { \ - VM_BUG_ON_PAGE(!PageType(page, 0), page); \ - page->page_type &= ~PG_##lname; \ + VM_BUG_ON_PAGE(data_race(page->page_type) != UINT_MAX, page); \ + page->page_type = (unsigned int)PGTY_##lname << 24; \ } \ static __always_inline void __ClearPage##uname(struct page *page) \ { \ VM_BUG_ON_PAGE(!Page##uname(page), page); \ - page->page_type |= PG_##lname; \ + page->page_type = UINT_MAX; \ } /* @@ -1076,6 +1076,13 @@ FOLIO_TEST_FLAG_FALSE(hugetlb) PAGE_TYPE_OPS(Zsmalloc, zsmalloc, zsmalloc) +/* + * Mark pages that has to be accepted before touched for the first time. + * + * Serialized with zone lock. + */ +PAGE_TYPE_OPS(Unaccepted, unaccepted, unaccepted) + /** * PageHuge - Determine if the page belongs to hugetlbfs * @page: The page to test. @@ -1175,25 +1182,20 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page) */ #define PAGE_FLAGS_SECOND \ (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \ - 1UL << PG_large_rmappable) + 1UL << PG_large_rmappable | 1UL << PG_partially_mapped) #define PAGE_FLAGS_PRIVATE \ (1UL << PG_private | 1UL << PG_private_2) /** - * page_has_private - Determine if page has private stuff - * @page: The page to be checked + * folio_has_private - Determine if folio has private stuff + * @folio: The folio to be checked * - * Determine if a page has private stuff, indicating that release routines + * Determine if a folio has private stuff, indicating that release routines * should be invoked upon it. */ -static inline int page_has_private(const struct page *page) -{ - return !!(page->flags & PAGE_FLAGS_PRIVATE); -} - -static inline bool folio_has_private(const struct folio *folio) +static inline int folio_has_private(const struct folio *folio) { - return page_has_private(&folio->page); + return !!(folio->flags & PAGE_FLAGS_PRIVATE); } #undef PF_ANY diff --git a/include/linux/page_counter.h b/include/linux/page_counter.h index 904c52f97284..79dbd8bc35a7 100644 --- a/include/linux/page_counter.h +++ b/include/linux/page_counter.h @@ -26,11 +26,14 @@ struct page_counter { atomic_long_t children_low_usage; unsigned long watermark; + /* Latest cg2 reset watermark */ + unsigned long local_watermark; unsigned long failcnt; /* Keep all the read most fields in a separete cacheline. */ CACHELINE_PADDING(_pad2_); + bool protection_support; unsigned long min; unsigned long low; unsigned long high; @@ -44,12 +47,17 @@ struct page_counter { #define PAGE_COUNTER_MAX (LONG_MAX / PAGE_SIZE) #endif +/* + * Protection is supported only for the first counter (with id 0). + */ static inline void page_counter_init(struct page_counter *counter, - struct page_counter *parent) + struct page_counter *parent, + bool protection_support) { - atomic_long_set(&counter->usage, 0); + counter->usage = (atomic_long_t)ATOMIC_LONG_INIT(0); counter->max = PAGE_COUNTER_MAX; counter->parent = parent; + counter->protection_support = protection_support; } static inline unsigned long page_counter_read(struct page_counter *counter) @@ -78,11 +86,24 @@ int page_counter_memparse(const char *buf, const char *max, static inline void page_counter_reset_watermark(struct page_counter *counter) { - counter->watermark = page_counter_read(counter); + unsigned long usage = page_counter_read(counter); + + /* + * Update local_watermark first, so it's always <= watermark + * (modulo CPU/compiler re-ordering) + */ + counter->local_watermark = usage; + counter->watermark = usage; } +#ifdef CONFIG_MEMCG void page_counter_calculate_protection(struct page_counter *root, struct page_counter *counter, bool recursive_protection); +#else +static inline void page_counter_calculate_protection(struct page_counter *root, + struct page_counter *counter, + bool recursive_protection) {} +#endif #endif /* _LINUX_PAGE_COUNTER_H */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index d9c7edb6422b..68a5f1ff3301 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -32,6 +32,8 @@ int invalidate_inode_pages2_range(struct address_space *mapping, pgoff_t start, pgoff_t end); int kiocb_invalidate_pages(struct kiocb *iocb, size_t count); void kiocb_invalidate_post_direct_write(struct kiocb *iocb, size_t count); +int filemap_invalidate_pages(struct address_space *mapping, + loff_t pos, loff_t end, bool nowait); int write_inode_now(struct inode *, int sync); int filemap_fdatawrite(struct address_space *); @@ -204,14 +206,21 @@ enum mapping_flags { AS_EXITING = 4, /* final truncate in progress */ /* writeback related tags are not used */ AS_NO_WRITEBACK_TAGS = 5, - AS_LARGE_FOLIO_SUPPORT = 6, - AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */ - AS_STABLE_WRITES, /* must wait for writeback before modifying + AS_RELEASE_ALWAYS = 6, /* Call ->release_folio(), even if no private data */ + AS_STABLE_WRITES = 7, /* must wait for writeback before modifying folio contents */ - AS_INACCESSIBLE, /* Do not attempt direct R/W access to the mapping, - including to move the mapping */ + AS_INACCESSIBLE = 8, /* Do not attempt direct R/W access to the mapping */ + /* Bits 16-25 are used for FOLIO_ORDER */ + AS_FOLIO_ORDER_BITS = 5, + AS_FOLIO_ORDER_MIN = 16, + AS_FOLIO_ORDER_MAX = AS_FOLIO_ORDER_MIN + AS_FOLIO_ORDER_BITS, }; +#define AS_FOLIO_ORDER_BITS_MASK ((1u << AS_FOLIO_ORDER_BITS) - 1) +#define AS_FOLIO_ORDER_MIN_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MIN) +#define AS_FOLIO_ORDER_MAX_MASK (AS_FOLIO_ORDER_BITS_MASK << AS_FOLIO_ORDER_MAX) +#define AS_FOLIO_ORDER_MASK (AS_FOLIO_ORDER_MIN_MASK | AS_FOLIO_ORDER_MAX_MASK) + /** * mapping_set_error - record a writeback error in the address_space * @mapping: the mapping in which an error should be set @@ -367,9 +376,64 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) #define MAX_XAS_ORDER (XA_CHUNK_SHIFT * 2 - 1) #define MAX_PAGECACHE_ORDER min(MAX_XAS_ORDER, PREFERRED_MAX_PAGECACHE_ORDER) +/* + * mapping_max_folio_size_supported() - Check the max folio size supported + * + * The filesystem should call this function at mount time if there is a + * requirement on the folio mapping size in the page cache. + */ +static inline size_t mapping_max_folio_size_supported(void) +{ + if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + return 1U << (PAGE_SHIFT + MAX_PAGECACHE_ORDER); + return PAGE_SIZE; +} + +/* + * mapping_set_folio_order_range() - Set the orders supported by a file. + * @mapping: The address space of the file. + * @min: Minimum folio order (between 0-MAX_PAGECACHE_ORDER inclusive). + * @max: Maximum folio order (between @min-MAX_PAGECACHE_ORDER inclusive). + * + * The filesystem should call this function in its inode constructor to + * indicate which base size (min) and maximum size (max) of folio the VFS + * can use to cache the contents of the file. This should only be used + * if the filesystem needs special handling of folio sizes (ie there is + * something the core cannot know). + * Do not tune it based on, eg, i_size. + * + * Context: This should not be called while the inode is active as it + * is non-atomic. + */ +static inline void mapping_set_folio_order_range(struct address_space *mapping, + unsigned int min, + unsigned int max) +{ + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + return; + + if (min > MAX_PAGECACHE_ORDER) + min = MAX_PAGECACHE_ORDER; + + if (max > MAX_PAGECACHE_ORDER) + max = MAX_PAGECACHE_ORDER; + + if (max < min) + max = min; + + mapping->flags = (mapping->flags & ~AS_FOLIO_ORDER_MASK) | + (min << AS_FOLIO_ORDER_MIN) | (max << AS_FOLIO_ORDER_MAX); +} + +static inline void mapping_set_folio_min_order(struct address_space *mapping, + unsigned int min) +{ + mapping_set_folio_order_range(mapping, min, MAX_PAGECACHE_ORDER); +} + /** * mapping_set_large_folios() - Indicate the file supports large folios. - * @mapping: The file. + * @mapping: The address space of the file. * * The filesystem should call this function in its inode constructor to * indicate that the VFS can use large folios to cache the contents of @@ -380,7 +444,44 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) */ static inline void mapping_set_large_folios(struct address_space *mapping) { - __set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); + mapping_set_folio_order_range(mapping, 0, MAX_PAGECACHE_ORDER); +} + +static inline unsigned int +mapping_max_folio_order(const struct address_space *mapping) +{ + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + return 0; + return (mapping->flags & AS_FOLIO_ORDER_MAX_MASK) >> AS_FOLIO_ORDER_MAX; +} + +static inline unsigned int +mapping_min_folio_order(const struct address_space *mapping) +{ + if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) + return 0; + return (mapping->flags & AS_FOLIO_ORDER_MIN_MASK) >> AS_FOLIO_ORDER_MIN; +} + +static inline unsigned long +mapping_min_folio_nrpages(struct address_space *mapping) +{ + return 1UL << mapping_min_folio_order(mapping); +} + +/** + * mapping_align_index() - Align index for this mapping. + * @mapping: The address_space. + * @index: The page index. + * + * The index of a folio must be naturally aligned. If you are adding a + * new folio to the page cache and need to know what index to give it, + * call this function. + */ +static inline pgoff_t mapping_align_index(struct address_space *mapping, + pgoff_t index) +{ + return round_down(index, mapping_min_folio_nrpages(mapping)); } /* @@ -389,20 +490,17 @@ static inline void mapping_set_large_folios(struct address_space *mapping) */ static inline bool mapping_large_folio_support(struct address_space *mapping) { - /* AS_LARGE_FOLIO_SUPPORT is only reasonable for pagecache folios */ + /* AS_FOLIO_ORDER is only reasonable for pagecache folios */ VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, "Anonymous mapping always supports large folio"); - return IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && - test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags); + return mapping_max_folio_order(mapping) > 0; } /* Return the maximum folio size for this pagecache mapping, in bytes. */ -static inline size_t mapping_max_folio_size(struct address_space *mapping) +static inline size_t mapping_max_folio_size(const struct address_space *mapping) { - if (mapping_large_folio_support(mapping)) - return PAGE_SIZE << MAX_PAGECACHE_ORDER; - return PAGE_SIZE; + return PAGE_SIZE << mapping_max_folio_order(mapping); } static inline int filemap_nr_thps(struct address_space *mapping) diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index 27cd1e59ccf7..f5eb5a32aeed 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -130,4 +130,62 @@ int walk_page_mapping(struct address_space *mapping, pgoff_t first_index, pgoff_t nr, const struct mm_walk_ops *ops, void *private); +typedef int __bitwise folio_walk_flags_t; + +/* + * Walk migration entries as well. Careful: a large folio might get split + * concurrently. + */ +#define FW_MIGRATION ((__force folio_walk_flags_t)BIT(0)) + +/* Walk shared zeropages (small + huge) as well. */ +#define FW_ZEROPAGE ((__force folio_walk_flags_t)BIT(1)) + +enum folio_walk_level { + FW_LEVEL_PTE, + FW_LEVEL_PMD, + FW_LEVEL_PUD, +}; + +/** + * struct folio_walk - folio_walk_start() / folio_walk_end() data + * @page: exact folio page referenced (if applicable) + * @level: page table level identifying the entry type + * @pte: pointer to the page table entry (FW_LEVEL_PTE). + * @pmd: pointer to the page table entry (FW_LEVEL_PMD). + * @pud: pointer to the page table entry (FW_LEVEL_PUD). + * @ptl: pointer to the page table lock. + * + * (see folio_walk_start() documentation for more details) + */ +struct folio_walk { + /* public */ + struct page *page; + enum folio_walk_level level; + union { + pte_t *ptep; + pud_t *pudp; + pmd_t *pmdp; + }; + union { + pte_t pte; + pud_t pud; + pmd_t pmd; + }; + /* private */ + struct vm_area_struct *vma; + spinlock_t *ptl; +}; + +struct folio *folio_walk_start(struct folio_walk *fw, + struct vm_area_struct *vma, unsigned long addr, + folio_walk_flags_t flags); + +#define folio_walk_end(__fw, __vma) do { \ + spin_unlock((__fw)->ptl); \ + if (likely((__fw)->level == FW_LEVEL_PTE)) \ + pte_unmap((__fw)->ptep); \ + vma_pgtable_walk_end(__vma); \ +} while (0) + #endif /* _LINUX_PAGEWALK_H */ diff --git a/include/linux/path.h b/include/linux/path.h index ca073e70decd..7ea389dc764b 100644 --- a/include/linux/path.h +++ b/include/linux/path.h @@ -18,12 +18,6 @@ static inline int path_equal(const struct path *path1, const struct path *path2) return path1->mnt == path2->mnt && path1->dentry == path2->dentry; } -static inline void path_put_init(struct path *path) -{ - path_put(path); - *path = (struct path) { }; -} - /* * Cleanup macro for use with __free(path_put). Avoids dereference and * copying @path unlike DEFINE_FREE(). path_put() will handle the empty diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index df54cd5b15db..0e8b74e63767 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h @@ -8,6 +8,7 @@ /* Address Translation Service */ bool pci_ats_supported(struct pci_dev *dev); int pci_enable_ats(struct pci_dev *dev, int ps); +int pci_prepare_ats(struct pci_dev *dev, int ps); void pci_disable_ats(struct pci_dev *dev); int pci_ats_queue_depth(struct pci_dev *dev); int pci_ats_page_aligned(struct pci_dev *dev); @@ -16,6 +17,8 @@ static inline bool pci_ats_supported(struct pci_dev *d) { return false; } static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } +static inline int pci_prepare_ats(struct pci_dev *dev, int ps) +{ return -ENODEV; } static inline void pci_disable_ats(struct pci_dev *d) { } static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 85bdf2adb760..42ef06136bd1 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h @@ -128,6 +128,7 @@ struct pci_epc_mem { * @group: configfs group representing the PCI EPC device * @lock: mutex to protect pci_epc ops * @function_num_map: bitmap to manage physical function number + * @domain_nr: PCI domain number of the endpoint controller * @init_complete: flag to indicate whether the EPC initialization is complete * or not */ @@ -145,10 +146,12 @@ struct pci_epc { /* mutex to protect against concurrent access of EP controller */ struct mutex lock; unsigned long function_num_map; + int domain_nr; bool init_complete; }; /** + * enum pci_epc_bar_type - configurability of endpoint BAR * @BAR_PROGRAMMABLE: The BAR mask can be configured by the EPC. * @BAR_FIXED: The BAR mask is fixed by the hardware. * @BAR_RESERVED: The BAR should not be touched by an EPF driver. diff --git a/include/linux/pci.h b/include/linux/pci.h index 4cf89a4b4cbc..573b4c4c2be6 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -371,6 +371,7 @@ struct pci_dev { can be generated */ unsigned int pme_poll:1; /* Poll device's PME status bit */ unsigned int pinned:1; /* Whether this dev is pinned */ + unsigned int config_rrs_sv:1; /* Config RRS software visibility */ unsigned int imm_ready:1; /* Supports Immediate Readiness */ unsigned int d1_support:1; /* Low power state D1 is supported */ unsigned int d2_support:1; /* Low power state D2 is supported */ @@ -517,6 +518,9 @@ struct pci_dev { #ifdef CONFIG_PCI_DOE struct xarray doe_mbs; /* Data Object Exchange mailboxes */ #endif +#ifdef CONFIG_PCI_NPEM + struct npem *npem; /* Native PCIe Enclosure Management */ +#endif u16 acs_cap; /* ACS Capability offset */ phys_addr_t rom; /* Physical address if not from BAR */ size_t romlen; /* Length if not from BAR */ @@ -1098,7 +1102,7 @@ enum pcie_bus_config_types { extern enum pcie_bus_config_types pcie_bus_config; -extern struct bus_type pci_bus_type; +extern const struct bus_type pci_bus_type; /* Do NOT directly access these two variables, unless you are arch-specific PCI * code, or PCI core code. */ @@ -1884,7 +1888,7 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) { return 0; } #endif int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); -void pci_bus_release_domain_nr(struct pci_bus *bus, struct device *parent); +void pci_bus_release_domain_nr(struct device *parent, int domain_nr); #endif /* Some architectures require additional setup to direct VGA traffic */ @@ -2290,8 +2294,11 @@ static inline void pci_fixup_device(enum pci_fixup_pass pass, #endif void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen); +void __iomem *pcim_iomap_region(struct pci_dev *pdev, int bar, + const char *name); void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr); void __iomem * const *pcim_iomap_table(struct pci_dev *pdev); +int pcim_request_region(struct pci_dev *pdev, int bar, const char *name); int pcim_iomap_regions(struct pci_dev *pdev, int mask, const char *name); int pcim_iomap_regions_request_all(struct pci_dev *pdev, int mask, const char *name); diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index e388c8b1cbc2..4cf6aaed5f35 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h @@ -580,6 +580,7 @@ #define PCI_DEVICE_ID_AMD_19H_M78H_DF_F3 0x12fb #define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3 #define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb +#define PCI_DEVICE_ID_AMD_1AH_M60H_DF_F3 0x124b #define PCI_DEVICE_ID_AMD_1AH_M70H_DF_F3 0x12bb #define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3 #define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b @@ -2661,6 +2662,8 @@ #define PCI_DEVICE_ID_DCI_PCCOM8 0x0002 #define PCI_DEVICE_ID_DCI_PCCOM2 0x0004 +#define PCI_VENDOR_ID_GLENFLY 0x6766 + #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_EESSC 0x0008 #define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8 @@ -2706,6 +2709,9 @@ #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 #define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a +#define PCI_DEVICE_ID_INTEL_DSA_GNRD 0x11fb +#define PCI_DEVICE_ID_INTEL_DSA_DMR 0x1212 +#define PCI_DEVICE_ID_INTEL_IAA_DMR 0x1216 #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 #define PCI_DEVICE_ID_INTEL_82437 0x122d #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 36b942b67b7d..c012df33a9f0 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h @@ -145,7 +145,7 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *); #define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem) static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, - bool read, unsigned long ip) + unsigned long ip) { lock_release(&sem->dep_map, ip); } diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 4b2047b78b67..b6321fc49159 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -135,7 +135,6 @@ extern void __init setup_per_cpu_areas(void); extern void __percpu *pcpu_alloc_noprof(size_t size, size_t align, bool reserved, gfp_t gfp) __alloc_size(1); -extern size_t pcpu_alloc_size(void __percpu *__pdata); #define __alloc_percpu_gfp(_size, _align, _gfp) \ alloc_hooks(pcpu_alloc_noprof(_size, _align, false, _gfp)) diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h index b3b34f6670cf..4b5b83677e3f 100644 --- a/include/linux/perf/arm_pmu.h +++ b/include/linux/perf/arm_pmu.h @@ -17,10 +17,14 @@ #ifdef CONFIG_ARM_PMU /* - * The ARMv7 CPU PMU supports up to 32 event counters. + * The Armv7 and Armv8.8 or less CPU PMU supports up to 32 event counters. + * The Armv8.9/9.4 CPU PMU supports up to 33 event counters. */ +#ifdef CONFIG_ARM #define ARMPMU_MAX_HWEVENTS 32 - +#else +#define ARMPMU_MAX_HWEVENTS 33 +#endif /* * ARM PMU hw_event flags */ @@ -96,7 +100,7 @@ struct arm_pmu { void (*stop)(struct arm_pmu *); void (*reset)(void *); int (*map_event)(struct perf_event *event); - int num_events; + DECLARE_BITMAP(cntr_mask, ARMPMU_MAX_HWEVENTS); bool secure_access; /* 32-bit ARM only */ #define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h index 7867db04ec98..3372c1b56486 100644 --- a/include/linux/perf/arm_pmuv3.h +++ b/include/linux/perf/arm_pmuv3.h @@ -6,8 +6,9 @@ #ifndef __PERF_ARM_PMUV3_H #define __PERF_ARM_PMUV3_H -#define ARMV8_PMU_MAX_COUNTERS 32 -#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1) +#define ARMV8_PMU_MAX_GENERAL_COUNTERS 31 +#define ARMV8_PMU_CYCLE_IDX 31 +#define ARMV8_PMU_INSTR_IDX 32 /* Not accessible from AArch32 */ /* * Common architectural and microarchitectural event numbers. @@ -227,8 +228,10 @@ */ #define ARMV8_PMU_OVSR_P GENMASK(30, 0) #define ARMV8_PMU_OVSR_C BIT(31) +#define ARMV8_PMU_OVSR_F BIT_ULL(32) /* arm64 only */ /* Mask for writable bits is both P and C fields */ -#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C) +#define ARMV8_PMU_OVERFLOWED_MASK (ARMV8_PMU_OVSR_P | ARMV8_PMU_OVSR_C | \ + ARMV8_PMU_OVSR_F) /* * PMXEVTYPER: Event selection reg diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1a8942277dda..fb908843f209 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -168,6 +168,9 @@ struct hw_perf_event { struct hw_perf_event_extra extra_reg; struct hw_perf_event_extra branch_reg; }; + struct { /* aux / Intel-PT */ + u64 aux_config; + }; struct { /* software */ struct hrtimer hrtimer; }; @@ -292,6 +295,19 @@ struct perf_event_pmu_context; #define PERF_PMU_CAP_AUX_OUTPUT 0x0080 #define PERF_PMU_CAP_EXTENDED_HW_TYPE 0x0100 +/** + * pmu::scope + */ +enum perf_pmu_scope { + PERF_PMU_SCOPE_NONE = 0, + PERF_PMU_SCOPE_CORE, + PERF_PMU_SCOPE_DIE, + PERF_PMU_SCOPE_CLUSTER, + PERF_PMU_SCOPE_PKG, + PERF_PMU_SCOPE_SYS_WIDE, + PERF_PMU_MAX_SCOPE, +}; + struct perf_output_handle; #define PMU_NULL_DEV ((void *)(~0UL)) @@ -315,6 +331,11 @@ struct pmu { */ int capabilities; + /* + * PMU scope + */ + unsigned int scope; + int __percpu *pmu_disable_count; struct perf_cpu_pmu_context __percpu *cpu_pmu_context; atomic_t exclusive_cnt; /* < 0: cpu; > 0: tsk */ @@ -615,10 +636,13 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *, * PERF_EV_CAP_SIBLING: An event with this flag must be a group sibling and * cannot be a group leader. If an event with this flag is detached from the * group it is scheduled out and moved into an unrecoverable ERROR state. + * PERF_EV_CAP_READ_SCOPE: A CPU event that can be read from any CPU of the + * PMU scope where it is active. */ #define PERF_EV_CAP_SOFTWARE BIT(0) #define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1) #define PERF_EV_CAP_SIBLING BIT(2) +#define PERF_EV_CAP_READ_SCOPE BIT(3) #define SWEVENT_HLIST_BITS 8 #define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) @@ -963,12 +987,16 @@ struct perf_event_context { struct rcu_head rcu_head; /* - * Sum (event->pending_work + event->pending_work) + * The count of events for which using the switch-out fast path + * should be avoided. + * + * Sum (event->pending_work + events with + * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ))) * * The SIGTRAP is targeted at ctx->task, as such it won't do changing * that until the signal is delivered. */ - local_t nr_pending; + local_t nr_no_switch_fast; }; struct perf_cpu_pmu_context { @@ -1602,13 +1630,7 @@ static inline int perf_is_paranoid(void) return sysctl_perf_event_paranoid > -1; } -static inline int perf_allow_kernel(struct perf_event_attr *attr) -{ - if (sysctl_perf_event_paranoid > 1 && !perfmon_capable()) - return -EACCES; - - return security_perf_event_open(attr, PERF_SECURITY_KERNEL); -} +int perf_allow_kernel(struct perf_event_attr *attr); static inline int perf_allow_cpu(struct perf_event_attr *attr) { diff --git a/include/linux/pgalloc_tag.h b/include/linux/pgalloc_tag.h index 207f0c83c8e9..59a3deb792a8 100644 --- a/include/linux/pgalloc_tag.h +++ b/include/linux/pgalloc_tag.h @@ -80,36 +80,6 @@ static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) } } -static inline void pgalloc_tag_split(struct page *page, unsigned int nr) -{ - int i; - struct page_ext *first_page_ext; - struct page_ext *page_ext; - union codetag_ref *ref; - struct alloc_tag *tag; - - if (!mem_alloc_profiling_enabled()) - return; - - first_page_ext = page_ext = page_ext_get(page); - if (unlikely(!page_ext)) - return; - - ref = codetag_ref_from_page_ext(page_ext); - if (!ref->ct) - goto out; - - tag = ct_to_alloc_tag(ref->ct); - page_ext = page_ext_next(page_ext); - for (i = 1; i < nr; i++) { - /* Set new reference to point to the original tag */ - alloc_tag_ref_set(codetag_ref_from_page_ext(page_ext), tag); - page_ext = page_ext_next(page_ext); - } -out: - page_ext_put(first_page_ext); -} - static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { struct alloc_tag *tag = NULL; @@ -142,7 +112,6 @@ static inline void clear_page_tag_ref(struct page *page) {} static inline void pgalloc_tag_add(struct page *page, struct task_struct *task, unsigned int nr) {} static inline void pgalloc_tag_sub(struct page *page, unsigned int nr) {} -static inline void pgalloc_tag_split(struct page *page, unsigned int nr) {} static inline struct alloc_tag *pgalloc_tag_get(struct page *page) { return NULL; } static inline void pgalloc_tag_sub_pages(struct alloc_tag *tag, unsigned int nr) {} diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 2a6a3cccfc36..e8b2ac6bd2ae 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -447,6 +447,12 @@ static inline void arch_check_zapped_pmd(struct vm_area_struct *vma, } #endif +#ifndef arch_check_zapped_pud +static inline void arch_check_zapped_pud(struct vm_area_struct *vma, pud_t pud) +{ +} +#endif + #ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long address, @@ -1950,6 +1956,18 @@ typedef unsigned int pgtbl_mod_mask; #define MAX_PTRS_PER_P4D PTRS_PER_P4D #endif +#ifndef pte_pgprot +#define pte_pgprot(x) ((pgprot_t) {0}) +#endif + +#ifndef pmd_pgprot +#define pmd_pgprot(x) ((pgprot_t) {0}) +#endif + +#ifndef pud_pgprot +#define pud_pgprot(x) ((pgprot_t) {0}) +#endif + /* description of effects of mapping type and prot in current implementation. * this is due to the limited x86 page protection hardware. The expected * behavior is in parens: diff --git a/include/linux/phy.h b/include/linux/phy.h index 6b7d40d49129..a98bc91a0cde 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h @@ -554,6 +554,9 @@ struct macsec_ops; * @drv: Pointer to the driver for this PHY instance * @devlink: Create a link between phy dev and mac dev, if the external phy * used by current mac interface is managed by another mac interface. + * @phyindex: Unique id across the phy's parent tree of phys to address the PHY + * from userspace, similar to ifindex. A zero index means the PHY + * wasn't assigned an id yet. * @phy_id: UID for this device found during discovery * @c45_ids: 802.3-c45 Device Identifiers if is_c45. * @is_c45: Set to true if this PHY uses clause 45 addressing. @@ -656,6 +659,7 @@ struct phy_device { struct device_link *devlink; + u32 phyindex; u32 phy_id; struct phy_c45_device_ids c45_ids; @@ -1777,6 +1781,8 @@ int phy_suspend(struct phy_device *phydev); int phy_resume(struct phy_device *phydev); int __phy_resume(struct phy_device *phydev); int phy_loopback(struct phy_device *phydev, bool enable); +int phy_sfp_connect_phy(void *upstream, struct phy_device *phy); +void phy_sfp_disconnect_phy(void *upstream, struct phy_device *phy); void phy_sfp_attach(void *upstream, struct sfp_bus *bus); void phy_sfp_detach(void *upstream, struct sfp_bus *bus); int phy_sfp_probe(struct phy_device *phydev, diff --git a/include/linux/phy_link_topology.h b/include/linux/phy_link_topology.h new file mode 100644 index 000000000000..68a59e25821c --- /dev/null +++ b/include/linux/phy_link_topology.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PHY device list allow maintaining a list of PHY devices that are + * part of a netdevice's link topology. PHYs can for example be chained, + * as is the case when using a PHY that exposes an SFP module, on which an + * SFP transceiver that embeds a PHY is connected. + * + * This list can then be used by userspace to leverage individual PHY + * capabilities. + */ +#ifndef __PHY_LINK_TOPOLOGY_H +#define __PHY_LINK_TOPOLOGY_H + +#include <linux/ethtool.h> +#include <linux/netdevice.h> + +struct xarray; +struct phy_device; +struct sfp_bus; + +struct phy_link_topology { + struct xarray phys; + u32 next_phy_index; +}; + +struct phy_device_node { + enum phy_upstream upstream_type; + + union { + struct net_device *netdev; + struct phy_device *phydev; + } upstream; + + struct sfp_bus *parent_sfp_bus; + + struct phy_device *phy; +}; + +#if IS_ENABLED(CONFIG_PHYLIB) +int phy_link_topo_add_phy(struct net_device *dev, + struct phy_device *phy, + enum phy_upstream upt, void *upstream); + +void phy_link_topo_del_phy(struct net_device *dev, struct phy_device *phy); + +static inline struct phy_device * +phy_link_topo_get_phy(struct net_device *dev, u32 phyindex) +{ + struct phy_link_topology *topo = dev->link_topo; + struct phy_device_node *pdn; + + if (!topo) + return NULL; + + pdn = xa_load(&topo->phys, phyindex); + if (pdn) + return pdn->phy; + + return NULL; +} + +#else +static inline int phy_link_topo_add_phy(struct net_device *dev, + struct phy_device *phy, + enum phy_upstream upt, void *upstream) +{ + return 0; +} + +static inline void phy_link_topo_del_phy(struct net_device *dev, + struct phy_device *phy) +{ +} + +static inline struct phy_device * +phy_link_topo_get_phy(struct net_device *dev, u32 phyindex) +{ + return NULL; +} +#endif + +#endif /* __PHY_LINK_TOPOLOGY_H */ diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 2381e07429a2..5c01048860c4 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h @@ -598,6 +598,8 @@ int phylink_fwnode_phy_connect(struct phylink *pl, const struct fwnode_handle *fwnode, u32 flags); void phylink_disconnect_phy(struct phylink *); +int phylink_set_fixed_link(struct phylink *, + const struct phylink_link_state *); void phylink_mac_change(struct phylink *, bool up); void phylink_pcs_change(struct phylink_pcs *, bool up); diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index a65d3d078e58..53cfde98433d 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h @@ -81,6 +81,8 @@ struct pinctrl_map; * @PIN_CONFIG_INPUT_SCHMITT_ENABLE: control schmitt-trigger mode on the pin. * If the argument != 0, schmitt-trigger mode is enabled. If it's 0, * schmitt-trigger mode is disabled. + * @PIN_CONFIG_INPUT_SCHMITT_UV: this will configure an input pin to run in + * schmitt-trigger mode. The argument is in uV. * @PIN_CONFIG_MODE_LOW_POWER: this will configure the pin for low power * operation, if several modes of operation are supported these can be * passed in the argument on a custom form, else just use argument 1 @@ -132,6 +134,7 @@ enum pin_config_param { PIN_CONFIG_INPUT_ENABLE, PIN_CONFIG_INPUT_SCHMITT, PIN_CONFIG_INPUT_SCHMITT_ENABLE, + PIN_CONFIG_INPUT_SCHMITT_UV, PIN_CONFIG_MODE_LOW_POWER, PIN_CONFIG_MODE_PWM, PIN_CONFIG_OUTPUT, diff --git a/include/linux/platform_data/amd_qdma.h b/include/linux/platform_data/amd_qdma.h new file mode 100644 index 000000000000..576d952f97ed --- /dev/null +++ b/include/linux/platform_data/amd_qdma.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2023-2024, Advanced Micro Devices, Inc. + */ + +#ifndef _PLATDATA_AMD_QDMA_H +#define _PLATDATA_AMD_QDMA_H + +#include <linux/dmaengine.h> + +/** + * struct qdma_queue_info - DMA queue information. This information is used to + * match queue when DMA channel is requested + * @dir: Channel transfer direction + */ +struct qdma_queue_info { + enum dma_transfer_direction dir; +}; + +#define QDMA_FILTER_PARAM(qinfo) ((void *)(qinfo)) + +struct dma_slave_map; + +/** + * struct qdma_platdata - Platform specific data for QDMA engine + * @max_mm_channels: Maximum number of MM DMA channels in each direction + * @device_map: DMA slave map + * @irq_index: The index of first IRQ + */ +struct qdma_platdata { + u32 max_mm_channels; + u32 irq_index; + struct dma_slave_map *device_map; +}; + +#endif /* _PLATDATA_AMD_QDMA_H */ diff --git a/include/linux/platform_data/cyttsp4.h b/include/linux/platform_data/cyttsp4.h deleted file mode 100644 index 5dc9d2be384b..000000000000 --- a/include/linux/platform_data/cyttsp4.h +++ /dev/null @@ -1,62 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Header file for: - * Cypress TrueTouch(TM) Standard Product (TTSP) touchscreen drivers. - * For use with Cypress Txx3xx parts. - * Supported parts include: - * CY8CTST341 - * CY8CTMA340 - * - * Copyright (C) 2009, 2010, 2011 Cypress Semiconductor, Inc. - * Copyright (C) 2012 Javier Martinez Canillas <javier@dowhile0.org> - * - * Contact Cypress Semiconductor at www.cypress.com (kev@cypress.com) - */ -#ifndef _CYTTSP4_H_ -#define _CYTTSP4_H_ - -#define CYTTSP4_MT_NAME "cyttsp4_mt" -#define CYTTSP4_I2C_NAME "cyttsp4_i2c_adapter" -#define CYTTSP4_SPI_NAME "cyttsp4_spi_adapter" - -#define CY_TOUCH_SETTINGS_MAX 32 - -struct touch_framework { - const uint16_t *abs; - uint8_t size; - uint8_t enable_vkeys; -} __packed; - -struct cyttsp4_mt_platform_data { - struct touch_framework *frmwrk; - unsigned short flags; - char const *inp_dev_name; -}; - -struct touch_settings { - const uint8_t *data; - uint32_t size; - uint8_t tag; -} __packed; - -struct cyttsp4_core_platform_data { - int irq_gpio; - int rst_gpio; - int level_irq_udelay; - int (*xres)(struct cyttsp4_core_platform_data *pdata, - struct device *dev); - int (*init)(struct cyttsp4_core_platform_data *pdata, - int on, struct device *dev); - int (*power)(struct cyttsp4_core_platform_data *pdata, - int on, struct device *dev, atomic_t *ignore_irq); - int (*irq_stat)(struct cyttsp4_core_platform_data *pdata, - struct device *dev); - struct touch_settings *sett[CY_TOUCH_SETTINGS_MAX]; -}; - -struct cyttsp4_platform_data { - struct cyttsp4_core_platform_data *core_pdata; - struct cyttsp4_mt_platform_data *mt_pdata; -}; - -#endif /* _CYTTSP4_H_ */ diff --git a/include/linux/platform_data/gpio-ath79.h b/include/linux/platform_data/gpio-ath79.h deleted file mode 100644 index 3ea6dd942c27..000000000000 --- a/include/linux/platform_data/gpio-ath79.h +++ /dev/null @@ -1,16 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Atheros AR7XXX/AR9XXX GPIO controller platform data - * - * Copyright (C) 2015 Alban Bedel <albeu@free.fr> - */ - -#ifndef __LINUX_PLATFORM_DATA_GPIO_ATH79_H -#define __LINUX_PLATFORM_DATA_GPIO_ATH79_H - -struct ath79_gpio_platform_data { - unsigned ngpios; - bool oe_inverted; -}; - -#endif diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h deleted file mode 100644 index b82e44662efe..000000000000 --- a/include/linux/platform_data/gpio-davinci.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * DaVinci GPIO Platform Related Defines - * - * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com/ - */ - -#ifndef __DAVINCI_GPIO_PLATFORM_H -#define __DAVINCI_GPIO_PLATFORM_H - -struct davinci_gpio_platform_data { - bool no_auto_base; - u32 base; - u32 ngpio; - u32 gpio_unbanked; -}; - -/* Convert GPIO signal to GPIO pin number */ -#define GPIO_TO_PIN(bank, gpio) (16 * (bank) + (gpio)) - -#endif diff --git a/include/linux/platform_data/keypad-nomadik-ske.h b/include/linux/platform_data/keypad-nomadik-ske.h deleted file mode 100644 index 7efabbca1dca..000000000000 --- a/include/linux/platform_data/keypad-nomadik-ske.h +++ /dev/null @@ -1,50 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Copyright (C) ST-Ericsson SA 2010 - * - * Author: Naveen Kumar Gaddipati <naveen.gaddipati@stericsson.com> - * - * ux500 Scroll key and Keypad Encoder (SKE) header - */ - -#ifndef __SKE_H -#define __SKE_H - -#include <linux/input/matrix_keypad.h> - -/* register definitions for SKE peripheral */ -#define SKE_CR 0x00 -#define SKE_VAL0 0x04 -#define SKE_VAL1 0x08 -#define SKE_DBCR 0x0C -#define SKE_IMSC 0x10 -#define SKE_RIS 0x14 -#define SKE_MIS 0x18 -#define SKE_ICR 0x1C - -/* - * Keypad module - */ - -/** - * struct keypad_platform_data - structure for platform specific data - * @init: pointer to keypad init function - * @exit: pointer to keypad deinitialisation function - * @keymap_data: matrix scan code table for keycodes - * @krow: maximum number of rows - * @kcol: maximum number of columns - * @debounce_ms: platform specific debounce time - * @no_autorepeat: flag for auto repetition - * @wakeup_enable: allow waking up the system - */ -struct ske_keypad_platform_data { - int (*init)(void); - int (*exit)(void); - const struct matrix_keymap_data *keymap_data; - u8 krow; - u8 kcol; - u8 debounce_ms; - bool no_autorepeat; - bool wakeup_enable; -}; -#endif /*__SKE_KPD_H*/ diff --git a/include/linux/platform_data/max6697.h b/include/linux/platform_data/max6697.h deleted file mode 100644 index 6fbb70005541..000000000000 --- a/include/linux/platform_data/max6697.h +++ /dev/null @@ -1,33 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * max6697.h - * Copyright (c) 2012 Guenter Roeck <linux@roeck-us.net> - */ - -#ifndef MAX6697_H -#define MAX6697_H - -#include <linux/types.h> - -/* - * For all bit masks: - * bit 0: local temperature - * bit 1..7: remote temperatures - */ -struct max6697_platform_data { - bool smbus_timeout_disable; /* set to disable SMBus timeouts */ - bool extended_range_enable; /* set to enable extended temp range */ - bool beta_compensation; /* set to enable beta compensation */ - u8 alert_mask; /* set bit to 1 to disable alert */ - u8 over_temperature_mask; /* set bit to 1 to disable */ - u8 resistance_cancellation; /* set bit to 0 to disable - * bit mask for MAX6581, - * boolean for other chips - */ - u8 ideality_mask; /* set bit to 0 to disable */ - u8 ideality_value; /* transistor ideality as per - * MAX6581 datasheet - */ -}; - -#endif /* MAX6697_H */ diff --git a/include/linux/platform_data/mcs.h b/include/linux/platform_data/mcs.h deleted file mode 100644 index fcc6f2a1f5c3..000000000000 --- a/include/linux/platform_data/mcs.h +++ /dev/null @@ -1,30 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * Copyright (C) 2009 - 2010 Samsung Electronics Co.Ltd - * Author: Joonyoung Shim <jy0922.shim@samsung.com> - * Author: HeungJun Kim <riverful.kim@samsung.com> - */ - -#ifndef __LINUX_MCS_H -#define __LINUX_MCS_H - -#define MCS_KEY_MAP(v, c) ((((v) & 0xff) << 16) | ((c) & 0xffff)) -#define MCS_KEY_VAL(v) (((v) >> 16) & 0xff) -#define MCS_KEY_CODE(v) ((v) & 0xffff) - -struct mcs_platform_data { - void (*poweron)(bool); - void (*cfg_pin)(void); - - /* touchscreen */ - unsigned int x_size; - unsigned int y_size; - - /* touchkey */ - const u32 *keymap; - unsigned int keymap_size; - unsigned int key_maxval; - bool no_autorepeat; -}; - -#endif /* __LINUX_MCS_H */ diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h index 8c659db4da6b..2ee1a679e592 100644 --- a/include/linux/platform_data/microchip-ksz.h +++ b/include/linux/platform_data/microchip-ksz.h @@ -27,7 +27,9 @@ enum ksz_chip_id { KSZ8795_CHIP_ID = 0x8795, KSZ8794_CHIP_ID = 0x8794, KSZ8765_CHIP_ID = 0x8765, - KSZ8830_CHIP_ID = 0x8830, + KSZ88X3_CHIP_ID = 0x8830, + KSZ8864_CHIP_ID = 0x8864, + KSZ8895_CHIP_ID = 0x8895, KSZ9477_CHIP_ID = 0x00947700, KSZ9896_CHIP_ID = 0x00989600, KSZ9897_CHIP_ID = 0x00989700, diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h deleted file mode 100644 index a49826214a39..000000000000 --- a/include/linux/platform_data/mtd-davinci-aemif.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * TI DaVinci AEMIF support - * - * Copyright 2010 (C) Texas Instruments, Inc. https://www.ti.com/ - * - * This file is licensed under the terms of the GNU General Public License - * version 2. This program is licensed "as is" without any warranty of any - * kind, whether express or implied. - */ -#ifndef _MACH_DAVINCI_AEMIF_H -#define _MACH_DAVINCI_AEMIF_H - -#include <linux/platform_device.h> - -#define NRCSR_OFFSET 0x00 -#define AWCCR_OFFSET 0x04 -#define A1CR_OFFSET 0x10 - -#define ACR_ASIZE_MASK 0x3 -#define ACR_EW_MASK BIT(30) -#define ACR_SS_MASK BIT(31) - -/* All timings in nanoseconds */ -struct davinci_aemif_timing { - u8 wsetup; - u8 wstrobe; - u8 whold; - - u8 rsetup; - u8 rstrobe; - u8 rhold; - - u8 ta; -}; - -#endif diff --git a/include/linux/platform_data/mtd-davinci.h b/include/linux/platform_data/mtd-davinci.h deleted file mode 100644 index dd474dd44848..000000000000 --- a/include/linux/platform_data/mtd-davinci.h +++ /dev/null @@ -1,88 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * mach-davinci/nand.h - * - * Copyright © 2006 Texas Instruments. - * - * Ported to 2.6.23 Copyright © 2008 by - * Sander Huijsen <Shuijsen@optelecom-nkf.com> - * Troy Kisky <troy.kisky@boundarydevices.com> - * Dirk Behme <Dirk.Behme@gmail.com> - * - * -------------------------------------------------------------------------- - */ - -#ifndef __ARCH_ARM_DAVINCI_NAND_H -#define __ARCH_ARM_DAVINCI_NAND_H - -#include <linux/mtd/rawnand.h> - -#define NANDFCR_OFFSET 0x60 -#define NANDFSR_OFFSET 0x64 -#define NANDF1ECC_OFFSET 0x70 - -/* 4-bit ECC syndrome registers */ -#define NAND_4BIT_ECC_LOAD_OFFSET 0xbc -#define NAND_4BIT_ECC1_OFFSET 0xc0 -#define NAND_4BIT_ECC2_OFFSET 0xc4 -#define NAND_4BIT_ECC3_OFFSET 0xc8 -#define NAND_4BIT_ECC4_OFFSET 0xcc -#define NAND_ERR_ADD1_OFFSET 0xd0 -#define NAND_ERR_ADD2_OFFSET 0xd4 -#define NAND_ERR_ERRVAL1_OFFSET 0xd8 -#define NAND_ERR_ERRVAL2_OFFSET 0xdc - -/* NOTE: boards don't need to use these address bits - * for ALE/CLE unless they support booting from NAND. - * They're used unless platform data overrides them. - */ -#define MASK_ALE 0x08 -#define MASK_CLE 0x10 - -struct davinci_nand_pdata { /* platform_data */ - uint32_t mask_ale; - uint32_t mask_cle; - - /* - * 0-indexed chip-select number of the asynchronous - * interface to which the NAND device has been connected. - * - * So, if you have NAND connected to CS3 of DA850, you - * will pass '1' here. Since the asynchronous interface - * on DA850 starts from CS2. - */ - uint32_t core_chipsel; - - /* for packages using two chipselects */ - uint32_t mask_chipsel; - - /* board's default static partition info */ - struct mtd_partition *parts; - unsigned nr_parts; - - /* none == NAND_ECC_ENGINE_TYPE_NONE (strongly *not* advised!!) - * soft == NAND_ECC_ENGINE_TYPE_SOFT - * else == NAND_ECC_ENGINE_TYPE_ON_HOST, according to ecc_bits - * - * All DaVinci-family chips support 1-bit hardware ECC. - * Newer ones also support 4-bit ECC, but are awkward - * using it with large page chips. - */ - enum nand_ecc_engine_type engine_type; - enum nand_ecc_placement ecc_placement; - u8 ecc_bits; - - /* e.g. NAND_BUSWIDTH_16 */ - unsigned options; - /* e.g. NAND_BBT_USE_FLASH */ - unsigned bbt_options; - - /* Main and mirror bbt descriptor overrides */ - struct nand_bbt_descr *bbt_td; - struct nand_bbt_descr *bbt_md; - - /* Access timings */ - struct davinci_aemif_timing *timing; -}; - -#endif /* __ARCH_ARM_DAVINCI_NAND_H */ diff --git a/include/linux/platform_data/ti-aemif.h b/include/linux/platform_data/ti-aemif.h deleted file mode 100644 index 77625251df07..000000000000 --- a/include/linux/platform_data/ti-aemif.h +++ /dev/null @@ -1,45 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * TI DaVinci AEMIF platform glue. - * - * Copyright (C) 2017 BayLibre SAS - * - * Author: - * Bartosz Golaszewski <bgolaszewski@baylibre.com> - */ - -#ifndef __TI_DAVINCI_AEMIF_DATA_H__ -#define __TI_DAVINCI_AEMIF_DATA_H__ - -#include <linux/of_platform.h> - -/** - * struct aemif_abus_data - Async bus configuration parameters. - * - * @cs - Chip-select number. - */ -struct aemif_abus_data { - u32 cs; -}; - -/** - * struct aemif_platform_data - Data to set up the TI aemif driver. - * - * @dev_lookup: of_dev_auxdata passed to of_platform_populate() for aemif - * subdevices. - * @cs_offset: Lowest allowed chip-select number. - * @abus_data: Array of async bus configuration entries. - * @num_abus_data: Number of abus entries. - * @sub_devices: Array of platform subdevices. - * @num_sub_devices: Number of subdevices. - */ -struct aemif_platform_data { - struct of_dev_auxdata *dev_lookup; - u32 cs_offset; - struct aemif_abus_data *abus_data; - size_t num_abus_data; - struct platform_device *sub_devices; - size_t num_sub_devices; -}; - -#endif /* __TI_DAVINCI_AEMIF_DATA_H__ */ diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h index 0aeeae1c1943..365e119bebaa 100644 --- a/include/linux/platform_data/x86/asus-wmi.h +++ b/include/linux/platform_data/x86/asus-wmi.h @@ -4,6 +4,7 @@ #include <linux/errno.h> #include <linux/types.h> +#include <linux/dmi.h> /* WMI Methods */ #define ASUS_WMI_METHODID_SPEC 0x43455053 /* BIOS SPECification */ @@ -62,12 +63,14 @@ #define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021 #define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */ #define ASUS_WMI_DEVID_LIGHTBAR 0x00050025 +#define ASUS_WMI_DEVID_OOBE 0x0005002F /* This can only be used to disable the screen, not re-enable */ #define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031 /* Writing a brightness re-enables the screen if disabled */ #define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032 #define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018 #define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075 +#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY_VIVO 0x00110019 /* Misc */ #define ASUS_WMI_DEVID_PANEL_OD 0x00050019 @@ -164,4 +167,39 @@ static inline int asus_wmi_evaluate_method(u32 method_id, u32 arg0, u32 arg1, } #endif +/* To be used by both hid-asus and asus-wmi to determine which controls kbd_brightness */ +static const struct dmi_system_id asus_use_hid_led_dmi_ids[] = { + { + .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Zephyrus"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Strix"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_PRODUCT_FAMILY, "ROG Flow"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GA403U"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "GU605M"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "RC71L"), + }, + }, + { }, +}; + #endif /* __PLATFORM_DATA_X86_ASUS_WMI_H */ diff --git a/include/linux/platform_data/intel-mid_wdt.h b/include/linux/platform_data/x86/intel-mid_wdt.h index 8dba70b4b020..e5c0210d0fec 100644 --- a/include/linux/platform_data/intel-mid_wdt.h +++ b/include/linux/platform_data/x86/intel-mid_wdt.h @@ -6,8 +6,8 @@ * Contact: David Cohen <david.a.cohen@linux.intel.com> */ -#ifndef __INTEL_MID_WDT_H__ -#define __INTEL_MID_WDT_H__ +#ifndef __PLATFORM_X86_INTEL_MID_WDT_H_ +#define __PLATFORM_X86_INTEL_MID_WDT_H_ #include <linux/platform_device.h> @@ -16,4 +16,4 @@ struct intel_mid_wdt_pdata { int (*probe)(struct platform_device *pdev); }; -#endif /*__INTEL_MID_WDT_H__*/ +#endif /* __PLATFORM_X86_INTEL_MID_WDT_H_ */ diff --git a/include/linux/platform_data/x86/intel_scu_ipc.h b/include/linux/platform_data/x86/intel_scu_ipc.h new file mode 100644 index 000000000000..0ca9962e97f2 --- /dev/null +++ b/include/linux/platform_data/x86/intel_scu_ipc.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PLATFORM_X86_INTEL_SCU_IPC_H_ +#define __PLATFORM_X86_INTEL_SCU_IPC_H_ + +#include <linux/ioport.h> + +struct device; +struct intel_scu_ipc_dev; + +/** + * struct intel_scu_ipc_data - Data used to configure SCU IPC + * @mem: Base address of SCU IPC MMIO registers + * @irq: The IRQ number used for SCU (optional) + */ +struct intel_scu_ipc_data { + struct resource mem; + int irq; +}; + +struct intel_scu_ipc_dev * +__intel_scu_ipc_register(struct device *parent, + const struct intel_scu_ipc_data *scu_data, + struct module *owner); + +#define intel_scu_ipc_register(parent, scu_data) \ + __intel_scu_ipc_register(parent, scu_data, THIS_MODULE) + +void intel_scu_ipc_unregister(struct intel_scu_ipc_dev *scu); + +struct intel_scu_ipc_dev * +__devm_intel_scu_ipc_register(struct device *parent, + const struct intel_scu_ipc_data *scu_data, + struct module *owner); + +#define devm_intel_scu_ipc_register(parent, scu_data) \ + __devm_intel_scu_ipc_register(parent, scu_data, THIS_MODULE) + +struct intel_scu_ipc_dev *intel_scu_ipc_dev_get(void); +void intel_scu_ipc_dev_put(struct intel_scu_ipc_dev *scu); +struct intel_scu_ipc_dev *devm_intel_scu_ipc_dev_get(struct device *dev); + +int intel_scu_ipc_dev_ioread8(struct intel_scu_ipc_dev *scu, u16 addr, + u8 *data); +int intel_scu_ipc_dev_iowrite8(struct intel_scu_ipc_dev *scu, u16 addr, + u8 data); +int intel_scu_ipc_dev_readv(struct intel_scu_ipc_dev *scu, u16 *addr, + u8 *data, size_t len); +int intel_scu_ipc_dev_writev(struct intel_scu_ipc_dev *scu, u16 *addr, + u8 *data, size_t len); + +int intel_scu_ipc_dev_update(struct intel_scu_ipc_dev *scu, u16 addr, + u8 data, u8 mask); + +int intel_scu_ipc_dev_simple_command(struct intel_scu_ipc_dev *scu, int cmd, + int sub); +int intel_scu_ipc_dev_command_with_size(struct intel_scu_ipc_dev *scu, int cmd, + int sub, const void *in, size_t inlen, + size_t size, void *out, size_t outlen); + +static inline int intel_scu_ipc_dev_command(struct intel_scu_ipc_dev *scu, int cmd, + int sub, const void *in, size_t inlen, + void *out, size_t outlen) +{ + return intel_scu_ipc_dev_command_with_size(scu, cmd, sub, in, inlen, + inlen, out, outlen); +} + +#endif diff --git a/include/linux/platform_data/zforce_ts.h b/include/linux/platform_data/zforce_ts.h deleted file mode 100644 index 2463a4a856a6..000000000000 --- a/include/linux/platform_data/zforce_ts.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* drivers/input/touchscreen/zforce.c - * - * Copyright (C) 2012-2013 MundoReader S.L. - */ - -#ifndef _LINUX_INPUT_ZFORCE_TS_H -#define _LINUX_INPUT_ZFORCE_TS_H - -struct zforce_ts_platdata { - unsigned int x_max; - unsigned int y_max; -}; - -#endif /* _LINUX_INPUT_ZFORCE_TS_H */ diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 858c8e7851fb..b637ec14025f 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -198,8 +198,11 @@ struct generic_pm_domain { spinlock_t slock; unsigned long lock_flags; }; + struct { + raw_spinlock_t raw_slock; + unsigned long raw_lock_flags; + }; }; - }; static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) @@ -473,6 +476,9 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev, int dev_pm_domain_attach_list(struct device *dev, const struct dev_pm_domain_attach_data *data, struct dev_pm_domain_list **list); +int devm_pm_domain_attach_list(struct device *dev, + const struct dev_pm_domain_attach_data *data, + struct dev_pm_domain_list **list); void dev_pm_domain_detach(struct device *dev, bool power_off); void dev_pm_domain_detach_list(struct dev_pm_domain_list *list); int dev_pm_domain_start(struct device *dev); @@ -499,6 +505,14 @@ static inline int dev_pm_domain_attach_list(struct device *dev, { return 0; } + +static inline int devm_pm_domain_attach_list(struct device *dev, + const struct dev_pm_domain_attach_data *data, + struct dev_pm_domain_list **list) +{ + return 0; +} + static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {} static inline void dev_pm_domain_detach_list(struct dev_pm_domain_list *list) {} static inline int dev_pm_domain_start(struct device *dev) diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index dc7b738de299..453691710839 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h @@ -158,7 +158,7 @@ static inline void posix_cputimers_init_work(void) { } * @rcu: RCU head for freeing the timer. */ struct k_itimer { - struct list_head list; + struct hlist_node list; struct hlist_node t_hash; spinlock_t it_lock; const struct k_clock *kclock; diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 72dc7e45c90c..910d407ebe63 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -243,8 +243,7 @@ struct power_supply_desc { const char *name; enum power_supply_type type; u8 charge_behaviours; - const enum power_supply_usb_type *usb_types; - size_t num_usb_types; + u32 usb_types; const enum power_supply_property *properties; size_t num_properties; diff --git a/include/linux/printk.h b/include/linux/printk.h index b937cefcb31c..eca9bb2ee637 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h @@ -9,6 +9,8 @@ #include <linux/ratelimit_types.h> #include <linux/once_lite.h> +struct console; + extern const char linux_banner[]; extern const char linux_proc_banner[]; @@ -161,15 +163,16 @@ int _printk(const char *fmt, ...); */ __printf(1, 2) __cold int _printk_deferred(const char *fmt, ...); -extern void __printk_safe_enter(void); -extern void __printk_safe_exit(void); +extern void __printk_deferred_enter(void); +extern void __printk_deferred_exit(void); + /* * The printk_deferred_enter/exit macros are available only as a hack for * some code paths that need to defer all printk console printing. Interrupts * must be disabled for the deferred duration. */ -#define printk_deferred_enter __printk_safe_enter -#define printk_deferred_exit __printk_safe_exit +#define printk_deferred_enter() __printk_deferred_enter() +#define printk_deferred_exit() __printk_deferred_exit() /* * Please don't use printk_ratelimit(), because it shares ratelimiting state @@ -197,6 +200,10 @@ extern asmlinkage void dump_stack_lvl(const char *log_lvl) __cold; extern asmlinkage void dump_stack(void) __cold; void printk_trigger_flush(void); void console_try_replay_all(void); +void printk_legacy_allow_panic_sync(void); +extern bool nbcon_device_try_acquire(struct console *con); +extern void nbcon_device_release(struct console *con); +void nbcon_atomic_flush_unsafe(void); #else static inline __printf(1, 0) int vprintk(const char *s, va_list args) @@ -279,6 +286,24 @@ static inline void printk_trigger_flush(void) static inline void console_try_replay_all(void) { } + +static inline void printk_legacy_allow_panic_sync(void) +{ +} + +static inline bool nbcon_device_try_acquire(struct console *con) +{ + return false; +} + +static inline void nbcon_device_release(struct console *con) +{ +} + +static inline void nbcon_atomic_flush_unsafe(void) +{ +} + #endif bool this_cpu_in_panic(void); diff --git a/include/linux/prmt.h b/include/linux/prmt.h index 24da8364b919..9c094294403f 100644 --- a/include/linux/prmt.h +++ b/include/linux/prmt.h @@ -2,6 +2,11 @@ #ifdef CONFIG_ACPI_PRMT void init_prmt(void); +int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer); #else static inline void init_prmt(void) { } +static inline int acpi_call_prm_handler(guid_t handler_guid, void *param_buffer) +{ + return -EOPNOTSUPP; +} #endif diff --git a/include/linux/pstore.h b/include/linux/pstore.h index 638507a3c8ff..fed601053c51 100644 --- a/include/linux/pstore.h +++ b/include/linux/pstore.h @@ -182,7 +182,7 @@ struct pstore_info { struct module *owner; const char *name; - spinlock_t buf_lock; + raw_spinlock_t buf_lock; char *buf; size_t bufsize; diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h index 6e4b8206c7d0..c892d22ce0a7 100644 --- a/include/linux/ptp_clock_kernel.h +++ b/include/linux/ptp_clock_kernel.h @@ -47,10 +47,12 @@ struct system_device_crosststamp; * struct ptp_system_timestamp - system time corresponding to a PHC timestamp * @pre_ts: system timestamp before capturing PHC * @post_ts: system timestamp after capturing PHC + * @clockid: clock-base used for capturing the system timestamps */ struct ptp_system_timestamp { struct timespec64 pre_ts; struct timespec64 post_ts; + clockid_t clockid; }; /** @@ -457,14 +459,40 @@ static inline ktime_t ptp_convert_timestamp(const ktime_t *hwtstamp, static inline void ptp_read_system_prets(struct ptp_system_timestamp *sts) { - if (sts) - ktime_get_real_ts64(&sts->pre_ts); + if (sts) { + switch (sts->clockid) { + case CLOCK_REALTIME: + ktime_get_real_ts64(&sts->pre_ts); + break; + case CLOCK_MONOTONIC: + ktime_get_ts64(&sts->pre_ts); + break; + case CLOCK_MONOTONIC_RAW: + ktime_get_raw_ts64(&sts->pre_ts); + break; + default: + break; + } + } } static inline void ptp_read_system_postts(struct ptp_system_timestamp *sts) { - if (sts) - ktime_get_real_ts64(&sts->post_ts); + if (sts) { + switch (sts->clockid) { + case CLOCK_REALTIME: + ktime_get_real_ts64(&sts->post_ts); + break; + case CLOCK_MONOTONIC: + ktime_get_ts64(&sts->post_ts); + break; + case CLOCK_MONOTONIC_RAW: + ktime_get_raw_ts64(&sts->post_ts); + break; + default: + break; + } + } } #endif diff --git a/include/linux/pwm.h b/include/linux/pwm.h index f8c2dc12dbd3..8acd60b53f58 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -394,9 +394,6 @@ static inline bool pwm_might_sleep(struct pwm_device *pwm) } /* PWM provider APIs */ -int pwm_capture(struct pwm_device *pwm, struct pwm_capture *result, - unsigned long timeout); - void pwmchip_put(struct pwm_chip *chip); struct pwm_chip *pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv); struct pwm_chip *devm_pwmchip_alloc(struct device *parent, unsigned int npwm, size_t sizeof_priv); @@ -462,13 +459,6 @@ static inline void pwm_disable(struct pwm_device *pwm) might_sleep(); } -static inline int pwm_capture(struct pwm_device *pwm, - struct pwm_capture *result, - unsigned long timeout) -{ - return -EINVAL; -} - static inline void pwmchip_put(struct pwm_chip *chip) { } diff --git a/include/linux/quota.h b/include/linux/quota.h index 07071e64abf3..89a0d83ddad0 100644 --- a/include/linux/quota.h +++ b/include/linux/quota.h @@ -526,7 +526,7 @@ struct quota_info { const struct quota_format_ops *ops[MAXQUOTAS]; /* Operations for each type */ }; -int register_quota_format(struct quota_format_type *fmt); +void register_quota_format(struct quota_format_type *fmt); void unregister_quota_format(struct quota_format_type *fmt); struct quota_module_name { diff --git a/include/linux/ratelimit_types.h b/include/linux/ratelimit_types.h index 002266693e50..765232ce0b5e 100644 --- a/include/linux/ratelimit_types.h +++ b/include/linux/ratelimit_types.h @@ -19,8 +19,8 @@ struct ratelimit_state { int burst; int printed; int missed; + unsigned int flags; unsigned long begin; - unsigned long flags; }; #define RATELIMIT_STATE_INIT_FLAGS(name, interval_init, burst_init, flags_init) { \ diff --git a/include/linux/rbtree.h b/include/linux/rbtree.h index f7edca369eda..7c173aa64e1e 100644 --- a/include/linux/rbtree.h +++ b/include/linux/rbtree.h @@ -245,6 +245,42 @@ rb_find_add(struct rb_node *node, struct rb_root *tree, } /** + * rb_find_add_rcu() - find equivalent @node in @tree, or add @node + * @node: node to look-for / insert + * @tree: tree to search / modify + * @cmp: operator defining the node order + * + * Adds a Store-Release for link_node. + * + * Returns the rb_node matching @node, or NULL when no match is found and @node + * is inserted. + */ +static __always_inline struct rb_node * +rb_find_add_rcu(struct rb_node *node, struct rb_root *tree, + int (*cmp)(struct rb_node *, const struct rb_node *)) +{ + struct rb_node **link = &tree->rb_node; + struct rb_node *parent = NULL; + int c; + + while (*link) { + parent = *link; + c = cmp(node, parent); + + if (c < 0) + link = &parent->rb_left; + else if (c > 0) + link = &parent->rb_right; + else + return parent; + } + + rb_link_node_rcu(node, parent, link); + rb_insert_color(node, tree); + return NULL; +} + +/** * rb_find() - find @key in tree @tree * @key: key to match * @tree: tree to search @@ -273,6 +309,37 @@ rb_find(const void *key, const struct rb_root *tree, } /** + * rb_find_rcu() - find @key in tree @tree + * @key: key to match + * @tree: tree to search + * @cmp: operator defining the node order + * + * Notably, tree descent vs concurrent tree rotations is unsound and can result + * in false-negatives. + * + * Returns the rb_node matching @key or NULL. + */ +static __always_inline struct rb_node * +rb_find_rcu(const void *key, const struct rb_root *tree, + int (*cmp)(const void *key, const struct rb_node *)) +{ + struct rb_node *node = tree->rb_node; + + while (node) { + int c = cmp(key, node); + + if (c < 0) + node = rcu_dereference_raw(node->rb_left); + else if (c > 0) + node = rcu_dereference_raw(node->rb_right); + else + return node; + } + + return NULL; +} + +/** * rb_find_first() - find the first @key in @tree * @key: key to match * @tree: tree to search diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index ba95c06675e1..2fdc2208f1ca 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h @@ -185,11 +185,7 @@ struct rcu_cblist { * ---------------------------------------------------------------------------- */ #define SEGCBLIST_ENABLED BIT(0) -#define SEGCBLIST_RCU_CORE BIT(1) -#define SEGCBLIST_LOCKING BIT(2) -#define SEGCBLIST_KTHREAD_CB BIT(3) -#define SEGCBLIST_KTHREAD_GP BIT(4) -#define SEGCBLIST_OFFLOADED BIT(5) +#define SEGCBLIST_OFFLOADED BIT(1) struct rcu_segcblist { struct rcu_head *head; diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 3dc1e58865f7..14dfa6008467 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -191,7 +191,10 @@ static inline void hlist_del_init_rcu(struct hlist_node *n) * @old : the element to be replaced * @new : the new element to insert * - * The @old entry will be replaced with the @new entry atomically. + * The @old entry will be replaced with the @new entry atomically from + * the perspective of concurrent readers. It is the caller's responsibility + * to synchronize with concurrent updaters, if any. + * * Note: @old should not be empty. */ static inline void list_replace_rcu(struct list_head *old, @@ -519,7 +522,9 @@ static inline void hlist_del_rcu(struct hlist_node *n) * @old : the element to be replaced * @new : the new element to insert * - * The @old entry will be replaced with the @new entry atomically. + * The @old entry will be replaced with the @new entry atomically from + * the perspective of concurrent readers. It is the caller's responsibility + * to synchronize with concurrent updaters, if any. */ static inline void hlist_replace_rcu(struct hlist_node *old, struct hlist_node *new) diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 13f6f00aecf9..58d84c59f3dd 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -34,10 +34,12 @@ #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) +#define RCU_SEQ_CTR_SHIFT 2 +#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1) + /* Exported common interfaces */ void call_rcu(struct rcu_head *head, rcu_callback_t func); void rcu_barrier_tasks(void); -void rcu_barrier_tasks_rude(void); void synchronize_rcu(void); struct rcu_gp_oldstate; @@ -144,11 +146,18 @@ void rcu_init_nohz(void); int rcu_nocb_cpu_offload(int cpu); int rcu_nocb_cpu_deoffload(int cpu); void rcu_nocb_flush_deferred_wakeup(void); + +#define RCU_NOCB_LOCKDEP_WARN(c, s) RCU_LOCKDEP_WARN(c, s) + #else /* #ifdef CONFIG_RCU_NOCB_CPU */ + static inline void rcu_init_nohz(void) { } static inline int rcu_nocb_cpu_offload(int cpu) { return -EINVAL; } static inline int rcu_nocb_cpu_deoffload(int cpu) { return 0; } static inline void rcu_nocb_flush_deferred_wakeup(void) { } + +#define RCU_NOCB_LOCKDEP_WARN(c, s) + #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ /* @@ -165,6 +174,7 @@ static inline void rcu_nocb_flush_deferred_wakeup(void) { } } while (0) void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks(void); +void rcu_tasks_torture_stats_print(char *tt, char *tf); # else # define rcu_tasks_classic_qs(t, preempt) do { } while (0) # define call_rcu_tasks call_rcu @@ -191,6 +201,7 @@ void rcu_tasks_trace_qs_blkd(struct task_struct *t); rcu_tasks_trace_qs_blkd(t); \ } \ } while (0) +void rcu_tasks_trace_torture_stats_print(char *tt, char *tf); # else # define rcu_tasks_trace_qs(t) do { } while (0) # endif @@ -202,8 +213,8 @@ do { \ } while (0) # ifdef CONFIG_TASKS_RUDE_RCU -void call_rcu_tasks_rude(struct rcu_head *head, rcu_callback_t func); void synchronize_rcu_tasks_rude(void); +void rcu_tasks_rude_torture_stats_print(char *tt, char *tf); # endif #define rcu_note_voluntary_context_switch(t) rcu_tasks_qs(t, false) diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index d9ac7b136aea..0ee270b3f5ed 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h @@ -111,6 +111,11 @@ static inline void __kvfree_call_rcu(struct rcu_head *head, void *ptr) kvfree(ptr); } +static inline void kvfree_rcu_barrier(void) +{ + rcu_barrier(); +} + #ifdef CONFIG_KASAN_GENERIC void kvfree_call_rcu(struct rcu_head *head, void *ptr); #else @@ -158,7 +163,7 @@ void rcu_scheduler_starting(void); static inline void rcu_end_inkernel_boot(void) { } static inline bool rcu_inkernel_boot_has_ended(void) { return true; } static inline bool rcu_is_watching(void) { return true; } -static inline void rcu_momentary_dyntick_idle(void) { } +static inline void rcu_momentary_eqs(void) { } static inline void kfree_rcu_scheduler_running(void) { } static inline bool rcu_gp_might_be_stalled(void) { return false; } diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 254244202ea9..90a684f94776 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -35,9 +35,10 @@ static inline void rcu_virt_note_context_switch(void) void synchronize_rcu_expedited(void); void kvfree_call_rcu(struct rcu_head *head, void *ptr); +void kvfree_rcu_barrier(void); void rcu_barrier(void); -void rcu_momentary_dyntick_idle(void); +void rcu_momentary_eqs(void); void kfree_rcu_scheduler_running(void); bool rcu_gp_might_be_stalled(void); diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 122e38161acb..f9ccad32fc5c 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h @@ -1521,6 +1521,9 @@ struct regmap_irq_chip_data; * struct regmap_irq_chip - Description of a generic regmap irq_chip. * * @name: Descriptive name for IRQ controller. + * @domain_suffix: Name suffix to be appended to end of IRQ domain name. Needed + * when multiple regmap-IRQ controllers are created from same + * device. * * @main_status: Base main status register address. For chips which have * interrupts arranged in separate sub-irq blocks with own IRQ @@ -1606,6 +1609,7 @@ struct regmap_irq_chip_data; */ struct regmap_irq_chip { const char *name; + const char *domain_suffix; unsigned int main_status; unsigned int num_main_status_bits; diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 373003ace639..997b34197385 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h @@ -147,7 +147,8 @@ void rfkill_destroy(struct rfkill *rfkill); * Prefer to use rfkill_set_hw_state if you don't need any special reason. */ bool rfkill_set_hw_state_reason(struct rfkill *rfkill, - bool blocked, unsigned long reason); + bool blocked, + enum rfkill_hard_block_reasons reason); /** * rfkill_set_hw_state - Set the internal rfkill hardware block state * @rfkill: pointer to the rfkill class to modify. @@ -280,7 +281,7 @@ static inline void rfkill_destroy(struct rfkill *rfkill) static inline bool rfkill_set_hw_state_reason(struct rfkill *rfkill, bool blocked, - unsigned long reason) + enum rfkill_hard_block_reasons reason) { return blocked; } diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index fd35d4ec12e1..17fbb7855295 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -89,6 +89,14 @@ void ring_buffer_discard_commit(struct trace_buffer *buffer, struct trace_buffer * __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); +struct trace_buffer *__ring_buffer_alloc_range(unsigned long size, unsigned flags, + int order, unsigned long start, + unsigned long range_size, + struct lock_class_key *key); + +bool ring_buffer_last_boot_delta(struct trace_buffer *buffer, long *text, + long *data); + /* * Because the ring buffer is generic, if other users of the ring buffer get * traced by ftrace, it can produce lockdep warnings. We need to keep each @@ -100,6 +108,18 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k __ring_buffer_alloc((size), (flags), &__key); \ }) +/* + * Because the ring buffer is generic, if other users of the ring buffer get + * traced by ftrace, it can produce lockdep warnings. We need to keep each + * ring buffer's lock class separate. + */ +#define ring_buffer_alloc_range(size, flags, order, start, range_size) \ +({ \ + static struct lock_class_key __key; \ + __ring_buffer_alloc_range((size), (flags), (order), (start), \ + (range_size), &__key); \ +}) + typedef bool (*ring_buffer_cond_fn)(void *data); int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full, ring_buffer_cond_fn cond, void *data); diff --git a/include/linux/rmap.h b/include/linux/rmap.h index 0978c64f49d8..d5e93e44322e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -331,7 +331,7 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio, switch (level) { case RMAP_LEVEL_PTE: if (!folio_test_large(folio)) { - atomic_inc(&page->_mapcount); + atomic_inc(&folio->_mapcount); break; } @@ -425,7 +425,7 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio, if (!folio_test_large(folio)) { if (PageAnonExclusive(page)) ClearPageAnonExclusive(page); - atomic_inc(&page->_mapcount); + atomic_inc(&folio->_mapcount); break; } @@ -745,7 +745,12 @@ int folio_mkclean(struct folio *); int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, struct vm_area_struct *vma); -void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked); +enum rmp_flags { + RMP_LOCKED = 1 << 0, + RMP_USE_SHARED_ZEROPAGE = 1 << 1, +}; + +void remove_migration_ptes(struct folio *src, struct folio *dst, int flags); /* * rmap_walk_control: To control rmap traversing for specific needs diff --git a/include/linux/rpmb.h b/include/linux/rpmb.h new file mode 100644 index 000000000000..cccda73eea4d --- /dev/null +++ b/include/linux/rpmb.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2015-2019 Intel Corp. All rights reserved + * Copyright (C) 2021-2022 Linaro Ltd + */ +#ifndef __RPMB_H__ +#define __RPMB_H__ + +#include <linux/device.h> +#include <linux/types.h> + +/** + * enum rpmb_type - type of underlying storage technology + * + * @RPMB_TYPE_EMMC : emmc (JESD84-B50.1) + * @RPMB_TYPE_UFS : UFS (JESD220) + * @RPMB_TYPE_NVME : NVM Express + */ +enum rpmb_type { + RPMB_TYPE_EMMC, + RPMB_TYPE_UFS, + RPMB_TYPE_NVME, +}; + +/** + * struct rpmb_descr - RPMB description provided by the underlying block device + * + * @type : block device type + * @route_frames : routes frames to and from the RPMB device + * @dev_id : unique device identifier read from the hardware + * @dev_id_len : length of unique device identifier + * @reliable_wr_count: number of sectors that can be written in one access + * @capacity : capacity of the device in units of 128K + * + * @dev_id is intended to be used as input when deriving the authenticaion key. + */ +struct rpmb_descr { + enum rpmb_type type; + int (*route_frames)(struct device *dev, u8 *req, unsigned int req_len, + u8 *resp, unsigned int resp_len); + u8 *dev_id; + size_t dev_id_len; + u16 reliable_wr_count; + u16 capacity; +}; + +/** + * struct rpmb_dev - device which can support RPMB partition + * + * @dev : device + * @id : device_id + * @list_node : linked list node + * @descr : RPMB description + */ +struct rpmb_dev { + struct device dev; + int id; + struct list_head list_node; + struct rpmb_descr descr; +}; + +#define to_rpmb_dev(x) container_of((x), struct rpmb_dev, dev) + +#if IS_ENABLED(CONFIG_RPMB) +struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev); +void rpmb_dev_put(struct rpmb_dev *rdev); +struct rpmb_dev *rpmb_dev_find_device(const void *data, + const struct rpmb_dev *start, + int (*match)(struct device *dev, + const void *data)); +int rpmb_interface_register(struct class_interface *intf); +void rpmb_interface_unregister(struct class_interface *intf); +struct rpmb_dev *rpmb_dev_register(struct device *dev, + struct rpmb_descr *descr); +int rpmb_dev_unregister(struct rpmb_dev *rdev); + +int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req, + unsigned int req_len, u8 *resp, unsigned int resp_len); + +#else +static inline struct rpmb_dev *rpmb_dev_get(struct rpmb_dev *rdev) +{ + return NULL; +} + +static inline void rpmb_dev_put(struct rpmb_dev *rdev) { } + +static inline struct rpmb_dev * +rpmb_dev_find_device(const void *data, const struct rpmb_dev *start, + int (*match)(struct device *dev, const void *data)) +{ + return NULL; +} + +static inline int rpmb_interface_register(struct class_interface *intf) +{ + return -EOPNOTSUPP; +} + +static inline void rpmb_interface_unregister(struct class_interface *intf) +{ +} + +static inline struct rpmb_dev * +rpmb_dev_register(struct device *dev, struct rpmb_descr *descr) +{ + return NULL; +} + +static inline int rpmb_dev_unregister(struct rpmb_dev *dev) +{ + return 0; +} + +static inline int rpmb_route_frames(struct rpmb_dev *rdev, u8 *req, + unsigned int req_len, u8 *resp, + unsigned int resp_len) +{ + return -EOPNOTSUPP; +} +#endif /* CONFIG_RPMB */ + +#endif /* __RPMB_H__ */ diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index c09cdcc99471..189140bf11fc 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -40,7 +40,7 @@ struct sbitmap_word { /** * @swap_lock: serializes simultaneous updates of ->word and ->cleared */ - spinlock_t swap_lock; + raw_spinlock_t swap_lock; } ____cacheline_aligned_in_smp; /** diff --git a/include/linux/sched.h b/include/linux/sched.h index f8d150343d42..e6ee4258169a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -82,6 +82,8 @@ struct task_group; struct task_struct; struct user_event_mm; +#include <linux/sched/ext.h> + /* * Task state bitmask. NOTE! These bits are also * encoded in fs/proc/array.c: get_task_state(). @@ -149,8 +151,9 @@ struct user_event_mm; * Special states are those that do not use the normal wait-loop pattern. See * the comment with set_special_state(). */ -#define is_special_task_state(state) \ - ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) +#define is_special_task_state(state) \ + ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \ + TASK_DEAD | TASK_FROZEN)) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP # define debug_normal_state_change(state_value) \ @@ -541,9 +544,14 @@ struct sched_entity { struct rb_node run_node; u64 deadline; u64 min_vruntime; + u64 min_slice; struct list_head group_node; - unsigned int on_rq; + unsigned char on_rq; + unsigned char sched_delayed; + unsigned char rel_deadline; + unsigned char custom_slice; + /* hole */ u64 exec_start; u64 sum_exec_runtime; @@ -639,12 +647,26 @@ struct sched_dl_entity { * * @dl_overrun tells if the task asked to be informed about runtime * overruns. + * + * @dl_server tells if this is a server entity. + * + * @dl_defer tells if this is a deferred or regular server. For + * now only defer server exists. + * + * @dl_defer_armed tells if the deferrable server is waiting + * for the replenishment timer to activate it. + * + * @dl_defer_running tells if the deferrable server is actually + * running, skipping the defer phase. */ unsigned int dl_throttled : 1; unsigned int dl_yielded : 1; unsigned int dl_non_contending : 1; unsigned int dl_overrun : 1; unsigned int dl_server : 1; + unsigned int dl_defer : 1; + unsigned int dl_defer_armed : 1; + unsigned int dl_defer_running : 1; /* * Bandwidth enforcement timer. Each -deadline task has its @@ -672,7 +694,7 @@ struct sched_dl_entity { */ struct rq *rq; dl_server_has_tasks_f server_has_tasks; - dl_server_pick_f server_pick; + dl_server_pick_f server_pick_task; #ifdef CONFIG_RT_MUTEXES /* @@ -810,6 +832,9 @@ struct task_struct { struct sched_rt_entity rt; struct sched_dl_entity dl; struct sched_dl_entity *dl_server; +#ifdef CONFIG_SCHED_CLASS_EXT + struct sched_ext_entity scx; +#endif const struct sched_class *sched_class; #ifdef CONFIG_SCHED_CORE @@ -1243,7 +1268,6 @@ struct task_struct { /* Sequence number to catch updates: */ seqcount_spinlock_t mems_allowed_seq; int cpuset_mem_spread_rotor; - int cpuset_slab_spread_rotor; #endif #ifdef CONFIG_CGROUPS /* Control Group info protected by css_set_lock: */ diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index df3aca89d4f5..3a912ab42bb5 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -10,16 +10,16 @@ #include <linux/sched.h> -#define MAX_DL_PRIO 0 - -static inline int dl_prio(int prio) +static inline bool dl_prio(int prio) { - if (unlikely(prio < MAX_DL_PRIO)) - return 1; - return 0; + return unlikely(prio < MAX_DL_PRIO); } -static inline int dl_task(struct task_struct *p) +/* + * Returns true if a task has a priority that belongs to DL class. PI-boosted + * tasks will return true. Use dl_policy() to ignore PI-boosted tasks. + */ +static inline bool dl_task(struct task_struct *p) { return dl_prio(p->prio); } diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h new file mode 100644 index 000000000000..1ddbde64a31b --- /dev/null +++ b/include/linux/sched/ext.h @@ -0,0 +1,215 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * BPF extensible scheduler class: Documentation/scheduler/sched-ext.rst + * + * Copyright (c) 2022 Meta Platforms, Inc. and affiliates. + * Copyright (c) 2022 Tejun Heo <tj@kernel.org> + * Copyright (c) 2022 David Vernet <dvernet@meta.com> + */ +#ifndef _LINUX_SCHED_EXT_H +#define _LINUX_SCHED_EXT_H + +#ifdef CONFIG_SCHED_CLASS_EXT + +#include <linux/llist.h> +#include <linux/rhashtable-types.h> + +enum scx_public_consts { + SCX_OPS_NAME_LEN = 128, + + SCX_SLICE_DFL = 20 * 1000000, /* 20ms */ + SCX_SLICE_INF = U64_MAX, /* infinite, implies nohz */ +}; + +/* + * DSQ (dispatch queue) IDs are 64bit of the format: + * + * Bits: [63] [62 .. 0] + * [ B] [ ID ] + * + * B: 1 for IDs for built-in DSQs, 0 for ops-created user DSQs + * ID: 63 bit ID + * + * Built-in IDs: + * + * Bits: [63] [62] [61..32] [31 .. 0] + * [ 1] [ L] [ R ] [ V ] + * + * 1: 1 for built-in DSQs. + * L: 1 for LOCAL_ON DSQ IDs, 0 for others + * V: For LOCAL_ON DSQ IDs, a CPU number. For others, a pre-defined value. + */ +enum scx_dsq_id_flags { + SCX_DSQ_FLAG_BUILTIN = 1LLU << 63, + SCX_DSQ_FLAG_LOCAL_ON = 1LLU << 62, + + SCX_DSQ_INVALID = SCX_DSQ_FLAG_BUILTIN | 0, + SCX_DSQ_GLOBAL = SCX_DSQ_FLAG_BUILTIN | 1, + SCX_DSQ_LOCAL = SCX_DSQ_FLAG_BUILTIN | 2, + SCX_DSQ_LOCAL_ON = SCX_DSQ_FLAG_BUILTIN | SCX_DSQ_FLAG_LOCAL_ON, + SCX_DSQ_LOCAL_CPU_MASK = 0xffffffffLLU, +}; + +/* + * A dispatch queue (DSQ) can be either a FIFO or p->scx.dsq_vtime ordered + * queue. A built-in DSQ is always a FIFO. The built-in local DSQs are used to + * buffer between the scheduler core and the BPF scheduler. See the + * documentation for more details. + */ +struct scx_dispatch_q { + raw_spinlock_t lock; + struct list_head list; /* tasks in dispatch order */ + struct rb_root priq; /* used to order by p->scx.dsq_vtime */ + u32 nr; + u32 seq; /* used by BPF iter */ + u64 id; + struct rhash_head hash_node; + struct llist_node free_node; + struct rcu_head rcu; +}; + +/* scx_entity.flags */ +enum scx_ent_flags { + SCX_TASK_QUEUED = 1 << 0, /* on ext runqueue */ + SCX_TASK_RESET_RUNNABLE_AT = 1 << 2, /* runnable_at should be reset */ + SCX_TASK_DEQD_FOR_SLEEP = 1 << 3, /* last dequeue was for SLEEP */ + + SCX_TASK_STATE_SHIFT = 8, /* bit 8 and 9 are used to carry scx_task_state */ + SCX_TASK_STATE_BITS = 2, + SCX_TASK_STATE_MASK = ((1 << SCX_TASK_STATE_BITS) - 1) << SCX_TASK_STATE_SHIFT, + + SCX_TASK_CURSOR = 1 << 31, /* iteration cursor, not a task */ +}; + +/* scx_entity.flags & SCX_TASK_STATE_MASK */ +enum scx_task_state { + SCX_TASK_NONE, /* ops.init_task() not called yet */ + SCX_TASK_INIT, /* ops.init_task() succeeded, but task can be cancelled */ + SCX_TASK_READY, /* fully initialized, but not in sched_ext */ + SCX_TASK_ENABLED, /* fully initialized and in sched_ext */ + + SCX_TASK_NR_STATES, +}; + +/* scx_entity.dsq_flags */ +enum scx_ent_dsq_flags { + SCX_TASK_DSQ_ON_PRIQ = 1 << 0, /* task is queued on the priority queue of a dsq */ +}; + +/* + * Mask bits for scx_entity.kf_mask. Not all kfuncs can be called from + * everywhere and the following bits track which kfunc sets are currently + * allowed for %current. This simple per-task tracking works because SCX ops + * nest in a limited way. BPF will likely implement a way to allow and disallow + * kfuncs depending on the calling context which will replace this manual + * mechanism. See scx_kf_allow(). + */ +enum scx_kf_mask { + SCX_KF_UNLOCKED = 0, /* sleepable and not rq locked */ + /* ENQUEUE and DISPATCH may be nested inside CPU_RELEASE */ + SCX_KF_CPU_RELEASE = 1 << 0, /* ops.cpu_release() */ + /* ops.dequeue (in REST) may be nested inside DISPATCH */ + SCX_KF_DISPATCH = 1 << 1, /* ops.dispatch() */ + SCX_KF_ENQUEUE = 1 << 2, /* ops.enqueue() and ops.select_cpu() */ + SCX_KF_SELECT_CPU = 1 << 3, /* ops.select_cpu() */ + SCX_KF_REST = 1 << 4, /* other rq-locked operations */ + + __SCX_KF_RQ_LOCKED = SCX_KF_CPU_RELEASE | SCX_KF_DISPATCH | + SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST, + __SCX_KF_TERMINAL = SCX_KF_ENQUEUE | SCX_KF_SELECT_CPU | SCX_KF_REST, +}; + +enum scx_dsq_lnode_flags { + SCX_DSQ_LNODE_ITER_CURSOR = 1 << 0, + + /* high 16 bits can be for iter cursor flags */ + __SCX_DSQ_LNODE_PRIV_SHIFT = 16, +}; + +struct scx_dsq_list_node { + struct list_head node; + u32 flags; + u32 priv; /* can be used by iter cursor */ +}; + +/* + * The following is embedded in task_struct and contains all fields necessary + * for a task to be scheduled by SCX. + */ +struct sched_ext_entity { + struct scx_dispatch_q *dsq; + struct scx_dsq_list_node dsq_list; /* dispatch order */ + struct rb_node dsq_priq; /* p->scx.dsq_vtime order */ + u32 dsq_seq; + u32 dsq_flags; /* protected by DSQ lock */ + u32 flags; /* protected by rq lock */ + u32 weight; + s32 sticky_cpu; + s32 holding_cpu; + u32 kf_mask; /* see scx_kf_mask above */ + struct task_struct *kf_tasks[2]; /* see SCX_CALL_OP_TASK() */ + atomic_long_t ops_state; + + struct list_head runnable_node; /* rq->scx.runnable_list */ + unsigned long runnable_at; + +#ifdef CONFIG_SCHED_CORE + u64 core_sched_at; /* see scx_prio_less() */ +#endif + u64 ddsp_dsq_id; + u64 ddsp_enq_flags; + + /* BPF scheduler modifiable fields */ + + /* + * Runtime budget in nsecs. This is usually set through + * scx_bpf_dispatch() but can also be modified directly by the BPF + * scheduler. Automatically decreased by SCX as the task executes. On + * depletion, a scheduling event is triggered. + * + * This value is cleared to zero if the task is preempted by + * %SCX_KICK_PREEMPT and shouldn't be used to determine how long the + * task ran. Use p->se.sum_exec_runtime instead. + */ + u64 slice; + + /* + * Used to order tasks when dispatching to the vtime-ordered priority + * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime() + * but can also be modified directly by the BPF scheduler. Modifying it + * while a task is queued on a dsq may mangle the ordering and is not + * recommended. + */ + u64 dsq_vtime; + + /* + * If set, reject future sched_setscheduler(2) calls updating the policy + * to %SCHED_EXT with -%EACCES. + * + * Can be set from ops.init_task() while the BPF scheduler is being + * loaded (!scx_init_task_args->fork). If set and the task's policy is + * already %SCHED_EXT, the task's policy is rejected and forcefully + * reverted to %SCHED_NORMAL. The number of such events are reported + * through /sys/kernel/debug/sched_ext::nr_rejected. Setting this flag + * during fork is not allowed. + */ + bool disallow; /* reject switching into SCX */ + + /* cold fields */ +#ifdef CONFIG_EXT_GROUP_SCHED + struct cgroup *cgrp_moving_from; +#endif + /* must be the last field, see init_scx_entity() */ + struct list_head tasks_node; +}; + +void sched_ext_free(struct task_struct *p); +void print_scx_info(const char *log_lvl, struct task_struct *p); + +#else /* !CONFIG_SCHED_CLASS_EXT */ + +static inline void sched_ext_free(struct task_struct *p) {} +static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {} + +#endif /* CONFIG_SCHED_CLASS_EXT */ +#endif /* _LINUX_SCHED_EXT_H */ diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 91546493c43d..07bb8d4181d7 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h @@ -179,27 +179,20 @@ static inline void mm_update_next_owner(struct mm_struct *mm) extern void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack); -extern unsigned long -arch_get_unmapped_area(struct file *, unsigned long, unsigned long, - unsigned long, unsigned long); -extern unsigned long + +unsigned long +arch_get_unmapped_area(struct file *filp, unsigned long addr, + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t vm_flags); +unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags); + unsigned long len, unsigned long pgoff, + unsigned long flags, vm_flags_t); unsigned long mm_get_unmapped_area(struct mm_struct *mm, struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags); -unsigned long -arch_get_unmapped_area_vmflags(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags, vm_flags_t vm_flags); -unsigned long -arch_get_unmapped_area_topdown_vmflags(struct file *filp, unsigned long addr, - unsigned long len, unsigned long pgoff, - unsigned long flags, vm_flags_t); - unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, struct file *filp, unsigned long addr, @@ -211,11 +204,11 @@ unsigned long mm_get_unmapped_area_vmflags(struct mm_struct *mm, unsigned long generic_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags); + unsigned long flags, vm_flags_t vm_flags); unsigned long generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags); + unsigned long flags, vm_flags_t vm_flags); #else static inline void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) {} diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index ab83d85e1183..6ab43b4f72f9 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -14,6 +14,7 @@ */ #define MAX_RT_PRIO 100 +#define MAX_DL_PRIO 0 #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index b2b9e6eb9683..4e3338103654 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -6,19 +6,40 @@ struct task_struct; -static inline int rt_prio(int prio) +static inline bool rt_prio(int prio) { - if (unlikely(prio < MAX_RT_PRIO)) - return 1; - return 0; + return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO); } -static inline int rt_task(struct task_struct *p) +static inline bool rt_or_dl_prio(int prio) +{ + return unlikely(prio < MAX_RT_PRIO); +} + +/* + * Returns true if a task has a priority that belongs to RT class. PI-boosted + * tasks will return true. Use rt_policy() to ignore PI-boosted tasks. + */ +static inline bool rt_task(struct task_struct *p) { return rt_prio(p->prio); } -static inline bool task_is_realtime(struct task_struct *tsk) +/* + * Returns true if a task has a priority that belongs to RT or DL classes. + * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore + * PI-boosted tasks. + */ +static inline bool rt_or_dl_task(struct task_struct *p) +{ + return rt_or_dl_prio(p->prio); +} + +/* + * Returns true if a task has a policy that belongs to RT or DL classes. + * PI-boosted tasks will return false. + */ +static inline bool rt_or_dl_task_policy(struct task_struct *tsk) { int policy = tsk->policy; diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 0a0e23c45406..c8ed09ac29ac 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h @@ -137,7 +137,7 @@ struct signal_struct { /* POSIX.1b Interval Timers */ unsigned int next_posix_timer_id; - struct list_head posix_timers; + struct hlist_head posix_timers; /* ITIMER_REAL timer for the process */ struct hrtimer real_timer; @@ -276,8 +276,7 @@ static inline void signal_set_stop_flags(struct signal_struct *sig, extern void flush_signals(struct task_struct *); extern void ignore_signals(struct task_struct *); extern void flush_signal_handlers(struct task_struct *, int force_default); -extern int dequeue_signal(struct task_struct *task, sigset_t *mask, - kernel_siginfo_t *info, enum pid_type *type); +extern int dequeue_signal(sigset_t *mask, kernel_siginfo_t *info, enum pid_type *type); static inline int kernel_dequeue_signal(void) { @@ -287,7 +286,7 @@ static inline int kernel_dequeue_signal(void) int ret; spin_lock_irq(&task->sighand->siglock); - ret = dequeue_signal(task, &task->blocked, &__info, &__type); + ret = dequeue_signal(&task->blocked, &__info, &__type); spin_unlock_irq(&task->sighand->siglock); return ret; diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index d362aacf9f89..0f2aeb37bbb0 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -63,7 +63,8 @@ extern asmlinkage void schedule_tail(struct task_struct *prev); extern void init_idle(struct task_struct *idle, int cpu); extern int sched_fork(unsigned long clone_flags, struct task_struct *p); -extern void sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern int sched_cgroup_fork(struct task_struct *p, struct kernel_clone_args *kargs); +extern void sched_cancel_fork(struct task_struct *p); extern void sched_post_fork(struct task_struct *p); extern void sched_dead(struct task_struct *p); @@ -119,6 +120,11 @@ static inline struct task_struct *get_task_struct(struct task_struct *t) return t; } +static inline struct task_struct *tryget_task_struct(struct task_struct *t) +{ + return refcount_inc_not_zero(&t->usage) ? t : NULL; +} + extern void __put_task_struct(struct task_struct *t); extern void __put_task_struct_rcu_cb(struct rcu_head *rhp); diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index ccd72b978e1f..bf10bdb487dd 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h @@ -95,23 +95,11 @@ static inline int object_is_on_stack(const void *obj) extern void thread_stack_cache_init(void); #ifdef CONFIG_DEBUG_STACK_USAGE +unsigned long stack_not_used(struct task_struct *p); +#else static inline unsigned long stack_not_used(struct task_struct *p) { - unsigned long *n = end_of_stack(p); - - do { /* Skip over canary */ -# ifdef CONFIG_STACK_GROWSUP - n--; -# else - n++; -# endif - } while (!*n); - -# ifdef CONFIG_STACK_GROWSUP - return (unsigned long)end_of_stack(p) - (unsigned long)n; -# else - return (unsigned long)n - (unsigned long)end_of_stack(p); -# endif + return 0; } #endif extern void set_task_stack_end_magic(struct task_struct *tsk); diff --git a/include/linux/scmi_imx_protocol.h b/include/linux/scmi_imx_protocol.h new file mode 100644 index 000000000000..066216f1357a --- /dev/null +++ b/include/linux/scmi_imx_protocol.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SCMI Message Protocol driver NXP extension header + * + * Copyright 2024 NXP. + */ + +#ifndef _LINUX_SCMI_NXP_PROTOCOL_H +#define _LINUX_SCMI_NXP_PROTOCOL_H + +#include <linux/bitfield.h> +#include <linux/device.h> +#include <linux/notifier.h> +#include <linux/types.h> + +enum scmi_nxp_protocol { + SCMI_PROTOCOL_IMX_BBM = 0x81, + SCMI_PROTOCOL_IMX_MISC = 0x84, +}; + +struct scmi_imx_bbm_proto_ops { + int (*rtc_time_set)(const struct scmi_protocol_handle *ph, u32 id, + uint64_t sec); + int (*rtc_time_get)(const struct scmi_protocol_handle *ph, u32 id, + u64 *val); + int (*rtc_alarm_set)(const struct scmi_protocol_handle *ph, u32 id, + bool enable, u64 sec); + int (*button_get)(const struct scmi_protocol_handle *ph, u32 *state); +}; + +enum scmi_nxp_notification_events { + SCMI_EVENT_IMX_BBM_RTC = 0x0, + SCMI_EVENT_IMX_BBM_BUTTON = 0x1, + SCMI_EVENT_IMX_MISC_CONTROL = 0x0, +}; + +struct scmi_imx_bbm_notif_report { + bool is_rtc; + bool is_button; + ktime_t timestamp; + unsigned int rtc_id; + unsigned int rtc_evt; +}; + +struct scmi_imx_misc_ctrl_notify_report { + ktime_t timestamp; + unsigned int ctrl_id; + unsigned int flags; +}; + +struct scmi_imx_misc_proto_ops { + int (*misc_ctrl_set)(const struct scmi_protocol_handle *ph, u32 id, + u32 num, u32 *val); + int (*misc_ctrl_get)(const struct scmi_protocol_handle *ph, u32 id, + u32 *num, u32 *val); + int (*misc_ctrl_req_notify)(const struct scmi_protocol_handle *ph, + u32 ctrl_id, u32 evt_id, u32 flags); +}; +#endif diff --git a/include/linux/security.h b/include/linux/security.h index 1390f1efb4f0..b86ec2afc691 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -83,6 +83,18 @@ enum lsm_event { LSM_POLICY_CHANGE, }; +struct dm_verity_digest { + const char *alg; + const u8 *digest; + size_t digest_len; +}; + +enum lsm_integrity_type { + LSM_INT_DMVERITY_SIG_VALID, + LSM_INT_DMVERITY_ROOTHASH, + LSM_INT_FSVERITY_BUILTINSIG_VALID, +}; + /* * These are reasons that can be passed to the security_locked_down() * LSM hook. Lockdown reasons that protect kernel integrity (ie, the @@ -399,6 +411,9 @@ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer void security_inode_getsecid(struct inode *inode, u32 *secid); int security_inode_copy_up(struct dentry *src, struct cred **new); int security_inode_copy_up_xattr(struct dentry *src, const char *name); +int security_inode_setintegrity(const struct inode *inode, + enum lsm_integrity_type type, const void *value, + size_t size); int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn); int security_file_permission(struct file *file, int mask); @@ -509,6 +524,11 @@ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); int security_locked_down(enum lockdown_reason what); int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, u32 *uctx_len, void *val, size_t val_len, u64 id, u64 flags); +int security_bdev_alloc(struct block_device *bdev); +void security_bdev_free(struct block_device *bdev); +int security_bdev_setintegrity(struct block_device *bdev, + enum lsm_integrity_type type, const void *value, + size_t size); #else /* CONFIG_SECURITY */ static inline int call_blocking_lsm_notifier(enum lsm_event event, void *data) @@ -634,7 +654,7 @@ static inline int security_settime64(const struct timespec64 *ts, static inline int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { - return __vm_enough_memory(mm, pages, cap_vm_enough_memory(mm, pages)); + return __vm_enough_memory(mm, pages, !cap_vm_enough_memory(mm, pages)); } static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm) @@ -1010,6 +1030,13 @@ static inline int security_inode_copy_up(struct dentry *src, struct cred **new) return 0; } +static inline int security_inode_setintegrity(const struct inode *inode, + enum lsm_integrity_type type, + const void *value, size_t size) +{ + return 0; +} + static inline int security_kernfs_init_security(struct kernfs_node *kn_dir, struct kernfs_node *kn) { @@ -1483,6 +1510,23 @@ static inline int lsm_fill_user_ctx(struct lsm_ctx __user *uctx, { return -EOPNOTSUPP; } + +static inline int security_bdev_alloc(struct block_device *bdev) +{ + return 0; +} + +static inline void security_bdev_free(struct block_device *bdev) +{ +} + +static inline int security_bdev_setintegrity(struct block_device *bdev, + enum lsm_integrity_type type, + const void *value, size_t size) +{ + return 0; +} + #endif /* CONFIG_SECURITY */ #if defined(CONFIG_SECURITY) && defined(CONFIG_WATCH_QUEUE) @@ -2090,6 +2134,7 @@ struct dentry *securityfs_create_symlink(const char *name, const char *target, const struct inode_operations *iops); extern void securityfs_remove(struct dentry *dentry); +extern void securityfs_recursive_remove(struct dentry *dentry); #else /* CONFIG_SECURITYFS */ @@ -2137,7 +2182,7 @@ extern int security_bpf_prog_load(struct bpf_prog *prog, union bpf_attr *attr, struct bpf_token *token); extern void security_bpf_prog_free(struct bpf_prog *prog); extern int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, - struct path *path); + const struct path *path); extern void security_bpf_token_free(struct bpf_token *token); extern int security_bpf_token_cmd(const struct bpf_token *token, enum bpf_cmd cmd); extern int security_bpf_token_capable(const struct bpf_token *token, int cap); @@ -2177,7 +2222,7 @@ static inline void security_bpf_prog_free(struct bpf_prog *prog) { } static inline int security_bpf_token_create(struct bpf_token *token, union bpf_attr *attr, - struct path *path) + const struct path *path) { return 0; } @@ -2256,4 +2301,12 @@ static inline int security_uring_cmd(struct io_uring_cmd *ioucmd) #endif /* CONFIG_SECURITY */ #endif /* CONFIG_IO_URING */ +#ifdef CONFIG_SECURITY +extern void security_initramfs_populated(void); +#else +static inline void security_initramfs_populated(void) +{ +} +#endif /* CONFIG_SECURITY */ + #endif /* ! __LINUX_SECURITY_H */ diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index d90d8ee29d81..fffeb754880f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h @@ -157,7 +157,7 @@ __seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \ static __always_inline unsigned \ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ { \ - unsigned seq = READ_ONCE(s->seqcount.sequence); \ + unsigned seq = smp_load_acquire(&s->seqcount.sequence); \ \ if (!IS_ENABLED(CONFIG_PREEMPT_RT)) \ return seq; \ @@ -170,7 +170,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \ * Re-read the sequence counter since the (possibly \ * preempted) writer made progress. \ */ \ - seq = READ_ONCE(s->seqcount.sequence); \ + seq = smp_load_acquire(&s->seqcount.sequence); \ } \ \ return seq; \ @@ -208,7 +208,7 @@ static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s) static inline unsigned __seqprop_sequence(const seqcount_t *s) { - return READ_ONCE(s->sequence); + return smp_load_acquire(&s->sequence); } static inline bool __seqprop_preemptible(const seqcount_t *s) @@ -263,17 +263,9 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) #define seqprop_assert(s) __seqprop(s, assert)(s) /** - * __read_seqcount_begin() - begin a seqcount_t read section w/o barrier + * __read_seqcount_begin() - begin a seqcount_t read section * @s: Pointer to seqcount_t or any of the seqcount_LOCKNAME_t variants * - * __read_seqcount_begin is like read_seqcount_begin, but has no smp_rmb() - * barrier. Callers should ensure that smp_rmb() or equivalent ordering is - * provided before actually loading any of the variables that are to be - * protected in this critical section. - * - * Use carefully, only in critical code, and comment how the barrier is - * provided. - * * Return: count to be passed to read_seqcount_retry() */ #define __read_seqcount_begin(s) \ @@ -293,13 +285,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) * * Return: count to be passed to read_seqcount_retry() */ -#define raw_read_seqcount_begin(s) \ -({ \ - unsigned _seq = __read_seqcount_begin(s); \ - \ - smp_rmb(); \ - _seq; \ -}) +#define raw_read_seqcount_begin(s) __read_seqcount_begin(s) /** * read_seqcount_begin() - begin a seqcount_t read critical section @@ -328,7 +314,6 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex) ({ \ unsigned __seq = seqprop_sequence(s); \ \ - smp_rmb(); \ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \ __seq; \ }) diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index aea25eef9a1a..4ab65874a850 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h @@ -11,6 +11,8 @@ #include <linux/compiler.h> #include <linux/console.h> #include <linux/interrupt.h> +#include <linux/lockdep.h> +#include <linux/printk.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/tty.h> @@ -590,6 +592,95 @@ struct uart_port { void *private_data; /* generic platform data pointer */ }; +/* + * Only for console->device_lock()/_unlock() callbacks and internal + * port lock wrapper synchronization. + */ +static inline void __uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags) +{ + spin_lock_irqsave(&up->lock, *flags); +} + +/* + * Only for console->device_lock()/_unlock() callbacks and internal + * port lock wrapper synchronization. + */ +static inline void __uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags) +{ + spin_unlock_irqrestore(&up->lock, flags); +} + +/** + * uart_port_set_cons - Safely set the @cons field for a uart + * @up: The uart port to set + * @con: The new console to set to + * + * This function must be used to set @up->cons. It uses the port lock to + * synchronize with the port lock wrappers in order to ensure that the console + * cannot change or disappear while another context is holding the port lock. + */ +static inline void uart_port_set_cons(struct uart_port *up, struct console *con) +{ + unsigned long flags; + + __uart_port_lock_irqsave(up, &flags); + up->cons = con; + __uart_port_unlock_irqrestore(up, flags); +} + +/* Only for internal port lock wrapper usage. */ +static inline bool __uart_port_using_nbcon(struct uart_port *up) +{ + lockdep_assert_held_once(&up->lock); + + if (likely(!uart_console(up))) + return false; + + /* + * @up->cons is only modified under the port lock. Therefore it is + * certain that it cannot disappear here. + * + * @up->cons->node is added/removed from the console list under the + * port lock. Therefore it is certain that the registration status + * cannot change here, thus @up->cons->flags can be read directly. + */ + if (hlist_unhashed_lockless(&up->cons->node) || + !(up->cons->flags & CON_NBCON) || + !up->cons->write_atomic) { + return false; + } + + return true; +} + +/* Only for internal port lock wrapper usage. */ +static inline bool __uart_port_nbcon_try_acquire(struct uart_port *up) +{ + if (!__uart_port_using_nbcon(up)) + return true; + + return nbcon_device_try_acquire(up->cons); +} + +/* Only for internal port lock wrapper usage. */ +static inline void __uart_port_nbcon_acquire(struct uart_port *up) +{ + if (!__uart_port_using_nbcon(up)) + return; + + while (!nbcon_device_try_acquire(up->cons)) + cpu_relax(); +} + +/* Only for internal port lock wrapper usage. */ +static inline void __uart_port_nbcon_release(struct uart_port *up) +{ + if (!__uart_port_using_nbcon(up)) + return; + + nbcon_device_release(up->cons); +} + /** * uart_port_lock - Lock the UART port * @up: Pointer to UART port structure @@ -597,6 +688,7 @@ struct uart_port { static inline void uart_port_lock(struct uart_port *up) { spin_lock(&up->lock); + __uart_port_nbcon_acquire(up); } /** @@ -606,6 +698,7 @@ static inline void uart_port_lock(struct uart_port *up) static inline void uart_port_lock_irq(struct uart_port *up) { spin_lock_irq(&up->lock); + __uart_port_nbcon_acquire(up); } /** @@ -616,6 +709,7 @@ static inline void uart_port_lock_irq(struct uart_port *up) static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags) { spin_lock_irqsave(&up->lock, *flags); + __uart_port_nbcon_acquire(up); } /** @@ -626,7 +720,15 @@ static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *f */ static inline bool uart_port_trylock(struct uart_port *up) { - return spin_trylock(&up->lock); + if (!spin_trylock(&up->lock)) + return false; + + if (!__uart_port_nbcon_try_acquire(up)) { + spin_unlock(&up->lock); + return false; + } + + return true; } /** @@ -638,7 +740,15 @@ static inline bool uart_port_trylock(struct uart_port *up) */ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags) { - return spin_trylock_irqsave(&up->lock, *flags); + if (!spin_trylock_irqsave(&up->lock, *flags)) + return false; + + if (!__uart_port_nbcon_try_acquire(up)) { + spin_unlock_irqrestore(&up->lock, *flags); + return false; + } + + return true; } /** @@ -647,6 +757,7 @@ static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long */ static inline void uart_port_unlock(struct uart_port *up) { + __uart_port_nbcon_release(up); spin_unlock(&up->lock); } @@ -656,6 +767,7 @@ static inline void uart_port_unlock(struct uart_port *up) */ static inline void uart_port_unlock_irq(struct uart_port *up) { + __uart_port_nbcon_release(up); spin_unlock_irq(&up->lock); } @@ -666,6 +778,7 @@ static inline void uart_port_unlock_irq(struct uart_port *up) */ static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags) { + __uart_port_nbcon_release(up); spin_unlock_irqrestore(&up->lock, flags); } diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h index 95ac8398ee72..e7aec20fb44f 100644 --- a/include/linux/set_memory.h +++ b/include/linux/set_memory.h @@ -8,10 +8,10 @@ #ifdef CONFIG_ARCH_HAS_SET_MEMORY #include <asm/set_memory.h> #else -static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } -static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } -static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } -static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +static inline int __must_check set_memory_ro(unsigned long addr, int numpages) { return 0; } +static inline int __must_check set_memory_rw(unsigned long addr, int numpages) { return 0; } +static inline int __must_check set_memory_x(unsigned long addr, int numpages) { return 0; } +static inline int __must_check set_memory_nx(unsigned long addr, int numpages) { return 0; } #endif #ifndef set_memory_rox diff --git a/include/linux/sfp.h b/include/linux/sfp.h index b14be59550e3..60c65cea74f6 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h @@ -550,7 +550,7 @@ struct sfp_upstream_ops { void (*link_down)(void *priv); void (*link_up)(void *priv); int (*connect_phy)(void *priv, struct phy_device *); - void (*disconnect_phy)(void *priv); + void (*disconnect_phy)(void *priv, struct phy_device *); }; #if IS_ENABLED(CONFIG_SFP) @@ -576,6 +576,7 @@ struct sfp_bus *sfp_bus_find_fwnode(const struct fwnode_handle *fwnode); int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream, const struct sfp_upstream_ops *ops); void sfp_bus_del_upstream(struct sfp_bus *bus); +const char *sfp_get_name(struct sfp_bus *bus); #else static inline int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id, @@ -654,6 +655,11 @@ static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream, static inline void sfp_bus_del_upstream(struct sfp_bus *bus) { } + +static inline const char *sfp_get_name(struct sfp_bus *bus) +{ + return NULL; +} #endif #endif diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 1d06b1e5408a..515a9a6a3c6f 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -111,20 +111,13 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); int shmem_unuse(unsigned int type); #ifdef CONFIG_TRANSPARENT_HUGEPAGE -extern bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags); unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, - bool global_huge); + loff_t write_end, bool shmem_huge_force); #else -static __always_inline bool shmem_is_huge(struct inode *inode, pgoff_t index, bool shmem_huge_force, - struct mm_struct *mm, unsigned long vm_flags) -{ - return false; -} static inline unsigned long shmem_allowable_huge_orders(struct inode *inode, struct vm_area_struct *vma, pgoff_t index, - bool global_huge) + loff_t write_end, bool shmem_huge_force) { return 0; } @@ -150,8 +143,8 @@ enum sgp_type { SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */ }; -int shmem_get_folio(struct inode *inode, pgoff_t index, struct folio **foliop, - enum sgp_type sgp); +int shmem_get_folio(struct inode *inode, pgoff_t index, loff_t write_end, + struct folio **foliop, enum sgp_type sgp); struct folio *shmem_read_folio_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 29c3ea5b6e93..39f1d16f3628 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -827,6 +827,8 @@ enum skb_tstamp_type { * @csum_level: indicates the number of consecutive checksums found in * the packet minus one that have been verified as * CHECKSUM_UNNECESSARY (max 3) + * @unreadable: indicates that at least 1 of the fragments in this skb is + * unreadable. * @dst_pending_confirm: need to confirm neighbour * @decrypted: Decrypted SKB * @slow_gro: state present at GRO time, slower prepare step required @@ -1008,7 +1010,7 @@ struct sk_buff { #if IS_ENABLED(CONFIG_IP_SCTP) __u8 csum_not_inet:1; #endif - + __u8 unreadable:1; #if defined(CONFIG_NET_SCHED) || defined(CONFIG_NET_XGRESS) __u16 tc_index; /* traffic control index */ #endif @@ -1225,7 +1227,7 @@ static inline bool skb_unref(struct sk_buff *skb) { if (unlikely(!skb)) return false; - if (likely(refcount_read(&skb->users) == 1)) + if (!IS_ENABLED(CONFIG_DEBUG_NET) && likely(refcount_read(&skb->users) == 1)) smp_rmb(); else if (likely(!refcount_dec_and_test(&skb->users))) return false; @@ -1433,6 +1435,7 @@ void skb_prepare_seq_read(struct sk_buff *skb, unsigned int from, unsigned int skb_seq_read(unsigned int consumed, const u8 **data, struct skb_seq_state *st); void skb_abort_seq_read(struct skb_seq_state *st); +int skb_copy_seq_read(struct skb_seq_state *st, int offset, void *to, int len); unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, unsigned int to, struct ts_config *config); @@ -1823,6 +1826,12 @@ static inline void skb_zcopy_downgrade_managed(struct sk_buff *skb) __skb_zcopy_downgrade_managed(skb); } +/* Return true if frags in this skb are readable by the host. */ +static inline bool skb_frags_readable(const struct sk_buff *skb) +{ + return !skb->unreadable; +} + static inline void skb_mark_not_on_list(struct sk_buff *skb) { skb->next = NULL; @@ -2539,10 +2548,17 @@ static inline void skb_len_add(struct sk_buff *skb, int delta) static inline void __skb_fill_netmem_desc(struct sk_buff *skb, int i, netmem_ref netmem, int off, int size) { - struct page *page = netmem_to_page(netmem); + struct page *page; __skb_fill_netmem_desc_noacc(skb_shinfo(skb), i, netmem, off, size); + if (netmem_is_net_iov(netmem)) { + skb->unreadable = true; + return; + } + + page = netmem_to_page(netmem); + /* Propagate page pfmemalloc to the skb if we can. The problem is * that not all callers have unique ownership of the page but rely * on page_is_pfmemalloc doing the right thing(tm). @@ -3523,21 +3539,58 @@ static inline void skb_frag_off_copy(skb_frag_t *fragto, fragto->offset = fragfrom->offset; } +/* Return: true if the skb_frag contains a net_iov. */ +static inline bool skb_frag_is_net_iov(const skb_frag_t *frag) +{ + return netmem_is_net_iov(frag->netmem); +} + +/** + * skb_frag_net_iov - retrieve the net_iov referred to by fragment + * @frag: the fragment + * + * Return: the &struct net_iov associated with @frag. Returns NULL if this + * frag has no associated net_iov. + */ +static inline struct net_iov *skb_frag_net_iov(const skb_frag_t *frag) +{ + if (!skb_frag_is_net_iov(frag)) + return NULL; + + return netmem_to_net_iov(frag->netmem); +} + /** * skb_frag_page - retrieve the page referred to by a paged fragment * @frag: the paged fragment * - * Returns the &struct page associated with @frag. + * Return: the &struct page associated with @frag. Returns NULL if this frag + * has no associated page. */ static inline struct page *skb_frag_page(const skb_frag_t *frag) { + if (skb_frag_is_net_iov(frag)) + return NULL; + return netmem_to_page(frag->netmem); } +/** + * skb_frag_netmem - retrieve the netmem referred to by a fragment + * @frag: the fragment + * + * Return: the &netmem_ref associated with @frag. + */ +static inline netmem_ref skb_frag_netmem(const skb_frag_t *frag) +{ + return frag->netmem; +} + int skb_pp_cow_data(struct page_pool *pool, struct sk_buff **pskb, unsigned int headroom); int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, struct bpf_prog *prog); + /** * skb_frag_address - gets the address of the data contained in a paged fragment * @frag: the paged fragment buffer @@ -3547,6 +3600,9 @@ int skb_cow_data_for_xdp(struct page_pool *pool, struct sk_buff **pskb, */ static inline void *skb_frag_address(const skb_frag_t *frag) { + if (!skb_frag_page(frag)) + return NULL; + return page_address(skb_frag_page(frag)) + skb_frag_off(frag); } diff --git a/include/linux/skbuff_ref.h b/include/linux/skbuff_ref.h index 16c241a23472..0f3c58007488 100644 --- a/include/linux/skbuff_ref.h +++ b/include/linux/skbuff_ref.h @@ -34,14 +34,13 @@ static inline void skb_frag_ref(struct sk_buff *skb, int f) bool napi_pp_put_page(netmem_ref netmem); -static inline void -skb_page_unref(struct page *page, bool recycle) +static inline void skb_page_unref(netmem_ref netmem, bool recycle) { #ifdef CONFIG_PAGE_POOL - if (recycle && napi_pp_put_page(page_to_netmem(page))) + if (recycle && napi_pp_put_page(netmem)) return; #endif - put_page(page); + put_page(netmem_to_page(netmem)); } /** @@ -54,7 +53,7 @@ skb_page_unref(struct page *page, bool recycle) */ static inline void __skb_frag_unref(skb_frag_t *frag, bool recycle) { - skb_page_unref(skb_frag_page(frag), recycle); + skb_page_unref(skb_frag_netmem(frag), recycle); } /** diff --git a/include/linux/slab.h b/include/linux/slab.h index eb2bf4629157..b35e2db7eb0e 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -213,6 +213,12 @@ enum _slab_flag_bits { #endif /* + * freeptr_t represents a SLUB freelist pointer, which might be encoded + * and not dereferenceable if CONFIG_SLAB_FREELIST_HARDENED is enabled. + */ +typedef struct { unsigned long v; } freeptr_t; + +/* * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests. * * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault. @@ -234,14 +240,173 @@ struct mem_cgroup; */ bool slab_is_available(void); -struct kmem_cache *kmem_cache_create(const char *name, unsigned int size, - unsigned int align, slab_flags_t flags, - void (*ctor)(void *)); -struct kmem_cache *kmem_cache_create_usercopy(const char *name, - unsigned int size, unsigned int align, - slab_flags_t flags, - unsigned int useroffset, unsigned int usersize, - void (*ctor)(void *)); +/** + * struct kmem_cache_args - Less common arguments for kmem_cache_create() + * + * Any uninitialized fields of the structure are interpreted as unused. The + * exception is @freeptr_offset where %0 is a valid value, so + * @use_freeptr_offset must be also set to %true in order to interpret the field + * as used. For @useroffset %0 is also valid, but only with non-%0 + * @usersize. + * + * When %NULL args is passed to kmem_cache_create(), it is equivalent to all + * fields unused. + */ +struct kmem_cache_args { + /** + * @align: The required alignment for the objects. + * + * %0 means no specific alignment is requested. + */ + unsigned int align; + /** + * @useroffset: Usercopy region offset. + * + * %0 is a valid offset, when @usersize is non-%0 + */ + unsigned int useroffset; + /** + * @usersize: Usercopy region size. + * + * %0 means no usercopy region is specified. + */ + unsigned int usersize; + /** + * @freeptr_offset: Custom offset for the free pointer + * in &SLAB_TYPESAFE_BY_RCU caches + * + * By default &SLAB_TYPESAFE_BY_RCU caches place the free pointer + * outside of the object. This might cause the object to grow in size. + * Cache creators that have a reason to avoid this can specify a custom + * free pointer offset in their struct where the free pointer will be + * placed. + * + * Note that placing the free pointer inside the object requires the + * caller to ensure that no fields are invalidated that are required to + * guard against object recycling (See &SLAB_TYPESAFE_BY_RCU for + * details). + * + * Using %0 as a value for @freeptr_offset is valid. If @freeptr_offset + * is specified, %use_freeptr_offset must be set %true. + * + * Note that @ctor currently isn't supported with custom free pointers + * as a @ctor requires an external free pointer. + */ + unsigned int freeptr_offset; + /** + * @use_freeptr_offset: Whether a @freeptr_offset is used. + */ + bool use_freeptr_offset; + /** + * @ctor: A constructor for the objects. + * + * The constructor is invoked for each object in a newly allocated slab + * page. It is the cache user's responsibility to free object in the + * same state as after calling the constructor, or deal appropriately + * with any differences between a freshly constructed and a reallocated + * object. + * + * %NULL means no constructor. + */ + void (*ctor)(void *); +}; + +struct kmem_cache *__kmem_cache_create_args(const char *name, + unsigned int object_size, + struct kmem_cache_args *args, + slab_flags_t flags); +static inline struct kmem_cache * +__kmem_cache_create(const char *name, unsigned int size, unsigned int align, + slab_flags_t flags, void (*ctor)(void *)) +{ + struct kmem_cache_args kmem_args = { + .align = align, + .ctor = ctor, + }; + + return __kmem_cache_create_args(name, size, &kmem_args, flags); +} + +/** + * kmem_cache_create_usercopy - Create a kmem cache with a region suitable + * for copying to userspace. + * @name: A string which is used in /proc/slabinfo to identify this cache. + * @size: The size of objects to be created in this cache. + * @align: The required alignment for the objects. + * @flags: SLAB flags + * @useroffset: Usercopy region offset + * @usersize: Usercopy region size + * @ctor: A constructor for the objects, or %NULL. + * + * This is a legacy wrapper, new code should use either KMEM_CACHE_USERCOPY() + * if whitelisting a single field is sufficient, or kmem_cache_create() with + * the necessary parameters passed via the args parameter (see + * &struct kmem_cache_args) + * + * Return: a pointer to the cache on success, NULL on failure. + */ +static inline struct kmem_cache * +kmem_cache_create_usercopy(const char *name, unsigned int size, + unsigned int align, slab_flags_t flags, + unsigned int useroffset, unsigned int usersize, + void (*ctor)(void *)) +{ + struct kmem_cache_args kmem_args = { + .align = align, + .ctor = ctor, + .useroffset = useroffset, + .usersize = usersize, + }; + + return __kmem_cache_create_args(name, size, &kmem_args, flags); +} + +/* If NULL is passed for @args, use this variant with default arguments. */ +static inline struct kmem_cache * +__kmem_cache_default_args(const char *name, unsigned int size, + struct kmem_cache_args *args, + slab_flags_t flags) +{ + struct kmem_cache_args kmem_default_args = {}; + + /* Make sure we don't get passed garbage. */ + if (WARN_ON_ONCE(args)) + return ERR_PTR(-EINVAL); + + return __kmem_cache_create_args(name, size, &kmem_default_args, flags); +} + +/** + * kmem_cache_create - Create a kmem cache. + * @__name: A string which is used in /proc/slabinfo to identify this cache. + * @__object_size: The size of objects to be created in this cache. + * @__args: Optional arguments, see &struct kmem_cache_args. Passing %NULL + * means defaults will be used for all the arguments. + * + * This is currently implemented as a macro using ``_Generic()`` to call + * either the new variant of the function, or a legacy one. + * + * The new variant has 4 parameters: + * ``kmem_cache_create(name, object_size, args, flags)`` + * + * See __kmem_cache_create_args() which implements this. + * + * The legacy variant has 5 parameters: + * ``kmem_cache_create(name, object_size, align, flags, ctor)`` + * + * The align and ctor parameters map to the respective fields of + * &struct kmem_cache_args + * + * Context: Cannot be called within a interrupt, but can be interrupted. + * + * Return: a pointer to the cache on success, NULL on failure. + */ +#define kmem_cache_create(__name, __object_size, __args, ...) \ + _Generic((__args), \ + struct kmem_cache_args *: __kmem_cache_create_args, \ + void *: __kmem_cache_default_args, \ + default: __kmem_cache_create)(__name, __object_size, __args, __VA_ARGS__) + void kmem_cache_destroy(struct kmem_cache *s); int kmem_cache_shrink(struct kmem_cache *s); @@ -253,20 +418,23 @@ int kmem_cache_shrink(struct kmem_cache *s); * f.e. add ____cacheline_aligned_in_smp to the struct declaration * then the objects will be properly aligned in SMP configurations. */ -#define KMEM_CACHE(__struct, __flags) \ - kmem_cache_create(#__struct, sizeof(struct __struct), \ - __alignof__(struct __struct), (__flags), NULL) +#define KMEM_CACHE(__struct, __flags) \ + __kmem_cache_create_args(#__struct, sizeof(struct __struct), \ + &(struct kmem_cache_args) { \ + .align = __alignof__(struct __struct), \ + }, (__flags)) /* * To whitelist a single field for copying to/from usercopy, use this * macro instead for KMEM_CACHE() above. */ -#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ - kmem_cache_create_usercopy(#__struct, \ - sizeof(struct __struct), \ - __alignof__(struct __struct), (__flags), \ - offsetof(struct __struct, __field), \ - sizeof_field(struct __struct, __field), NULL) +#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ + __kmem_cache_create_args(#__struct, sizeof(struct __struct), \ + &(struct kmem_cache_args) { \ + .align = __alignof__(struct __struct), \ + .useroffset = offsetof(struct __struct, __field), \ + .usersize = sizeof_field(struct __struct, __field), \ + }, (__flags)) /* * Common kmalloc functions provided by all allocators @@ -547,6 +715,35 @@ void *kmem_cache_alloc_lru_noprof(struct kmem_cache *s, struct list_lru *lru, gfp_t gfpflags) __assume_slab_alignment __malloc; #define kmem_cache_alloc_lru(...) alloc_hooks(kmem_cache_alloc_lru_noprof(__VA_ARGS__)) +/** + * kmem_cache_charge - memcg charge an already allocated slab memory + * @objp: address of the slab object to memcg charge + * @gfpflags: describe the allocation context + * + * kmem_cache_charge allows charging a slab object to the current memcg, + * primarily in cases where charging at allocation time might not be possible + * because the target memcg is not known (i.e. softirq context) + * + * The objp should be pointer returned by the slab allocator functions like + * kmalloc (with __GFP_ACCOUNT in flags) or kmem_cache_alloc. The memcg charge + * behavior can be controlled through gfpflags parameter, which affects how the + * necessary internal metadata can be allocated. Including __GFP_NOFAIL denotes + * that overcharging is requested instead of failure, but is not applied for the + * internal metadata allocation. + * + * There are several cases where it will return true even if the charging was + * not done: + * More specifically: + * + * 1. For !CONFIG_MEMCG or cgroup_disable=memory systems. + * 2. Already charged slab objects. + * 3. For slab objects from KMALLOC_NORMAL caches - allocated by kmalloc() + * without __GFP_ACCOUNT + * 4. Allocating internal metadata has failed + * + * Return: true if charge was successful otherwise false. + */ +bool kmem_cache_charge(void *objp, gfp_t gfpflags); void kmem_cache_free(struct kmem_cache *s, void *objp); kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags, @@ -733,6 +930,16 @@ static inline __alloc_size(1, 2) void *kmalloc_array_noprof(size_t n, size_t siz * @new_n: new number of elements to alloc * @new_size: new size of a single member of the array * @flags: the type of memory to allocate (see kmalloc) + * + * If __GFP_ZERO logic is requested, callers must ensure that, starting with the + * initial memory allocation, every subsequent call to this API for the same + * memory allocation is flagged with __GFP_ZERO. Otherwise, it is possible that + * __GFP_ZERO is not fully honored by this API. + * + * See krealloc_noprof() for further details. + * + * In any case, the contents of the object pointed to are preserved up to the + * lesser of the new and old sizes. */ static inline __realloc_size(2, 3) void * __must_check krealloc_array_noprof(void *p, size_t new_n, @@ -841,8 +1048,8 @@ kvmalloc_array_node_noprof(size_t n, size_t size, gfp_t flags, int node) #define kvcalloc_node(...) alloc_hooks(kvcalloc_node_noprof(__VA_ARGS__)) #define kvcalloc(...) alloc_hooks(kvcalloc_noprof(__VA_ARGS__)) -extern void *kvrealloc_noprof(const void *p, size_t oldsize, size_t newsize, gfp_t flags) - __realloc_size(3); +void *kvrealloc_noprof(const void *p, size_t size, gfp_t flags) + __realloc_size(2); #define kvrealloc(...) alloc_hooks(kvrealloc_noprof(__VA_ARGS__)) extern void kvfree(const void *addr); diff --git a/include/linux/smp.h b/include/linux/smp.h index fcd61dfe2af3..f1aa0952e8c3 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h @@ -109,7 +109,7 @@ static inline void on_each_cpu_cond(smp_cond_func_t cond_func, * Architecture specific boot CPU setup. Defined as empty weak function in * init/main.c. Architectures can override it. */ -void smp_prepare_boot_cpu(void); +void __init smp_prepare_boot_cpu(void); #ifdef CONFIG_SMP @@ -294,4 +294,10 @@ int smpcfd_prepare_cpu(unsigned int cpu); int smpcfd_dead_cpu(unsigned int cpu); int smpcfd_dying_cpu(unsigned int cpu); +#ifdef CONFIG_CSD_LOCK_WAIT_DEBUG +bool csd_lock_is_stuck(void); +#else +static inline bool csd_lock_is_stuck(void) { return false; } +#endif + #endif /* __LINUX_SMP_H */ diff --git a/include/linux/socket.h b/include/linux/socket.h index df9cdb8bbfb8..d18cc47e89bd 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -327,6 +327,7 @@ struct ucred { * plain text and require encryption */ +#define MSG_SOCK_DEVMEM 0x2000000 /* Receive devmem skbs as cmsg */ #define MSG_ZEROCOPY 0x4000000 /* Use user data in kernel path */ #define MSG_SPLICE_PAGES 0x8000000 /* Splice the pages from the iterator in sendmsg() */ #define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h index 94fc1b57c57b..5e0dd47a0412 100644 --- a/include/linux/soundwire/sdw.h +++ b/include/linux/soundwire/sdw.h @@ -704,8 +704,6 @@ struct sdw_master_device { container_of(d, struct sdw_master_device, dev) struct sdw_driver { - const char *name; - int (*probe)(struct sdw_slave *sdw, const struct sdw_device_id *id); int (*remove)(struct sdw_slave *sdw); diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h index d537587b4499..37ae69365fe2 100644 --- a/include/linux/soundwire/sdw_intel.h +++ b/include/linux/soundwire/sdw_intel.h @@ -388,6 +388,7 @@ struct sdw_intel; /* struct intel_sdw_hw_ops - SoundWire ops for Intel platforms. * @debugfs_init: initialize all debugfs capabilities * @debugfs_exit: close and cleanup debugfs capabilities + * @get_link_count: fetch link count from hardware registers * @register_dai: read all PDI information and register DAIs * @check_clock_stop: throw error message if clock is not stopped. * @start_bus: normal start @@ -412,6 +413,8 @@ struct sdw_intel_hw_ops { void (*debugfs_init)(struct sdw_intel *sdw); void (*debugfs_exit)(struct sdw_intel *sdw); + int (*get_link_count)(struct sdw_intel *sdw); + int (*register_dai)(struct sdw_intel *sdw); void (*check_clock_stop)(struct sdw_intel *sdw); @@ -447,4 +450,9 @@ extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops; #define SDW_INTEL_DEV_NUM_IDA_MIN 6 +/* + * Max number of links supported in hardware + */ +#define SDW_INTEL_MAX_LINKS 5 + #endif diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index d47d5f14ff99..4b95663163e0 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h @@ -498,7 +498,6 @@ extern struct spi_device *spi_new_ancillary_device(struct spi_device *spi, u8 ch * controller has native support for memory like operations. * @mem_caps: controller capabilities for the handling of memory operations. * @unprepare_message: undo any work done by prepare_message(). - * @slave_abort: abort the ongoing transfer request on an SPI slave controller * @target_abort: abort the ongoing transfer request on an SPI target controller * @cs_gpiods: Array of GPIO descriptors to use as chip select lines; one per CS * number. Any individual value may be NULL for CS lines that @@ -725,10 +724,7 @@ struct spi_controller { struct spi_message *message); int (*unprepare_message)(struct spi_controller *ctlr, struct spi_message *message); - union { - int (*slave_abort)(struct spi_controller *ctlr); - int (*target_abort)(struct spi_controller *ctlr); - }; + int (*target_abort)(struct spi_controller *ctlr); /* * These hooks are for drivers that use a generic implementation @@ -802,11 +798,6 @@ static inline void spi_controller_put(struct spi_controller *ctlr) put_device(&ctlr->dev); } -static inline bool spi_controller_is_slave(struct spi_controller *ctlr) -{ - return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave; -} - static inline bool spi_controller_is_target(struct spi_controller *ctlr) { return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->target; @@ -1296,7 +1287,6 @@ extern int devm_spi_optimize_message(struct device *dev, struct spi_device *spi, extern int spi_setup(struct spi_device *spi); extern int spi_async(struct spi_device *spi, struct spi_message *message); -extern int spi_slave_abort(struct spi_device *spi); extern int spi_target_abort(struct spi_device *spi); static inline size_t diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h index d4cb83195f7a..c92cd43a47f4 100644 --- a/include/linux/spi/spi_bitbang.h +++ b/include/linux/spi/spi_bitbang.h @@ -24,6 +24,7 @@ struct spi_bitbang { #define BITBANG_CS_ACTIVE 1 /* normally nCS, active low */ #define BITBANG_CS_INACTIVE 0 + void (*set_mosi_idle)(struct spi_device *spi); /* txrx_bufs() may handle dma mapping for transfers that don't * already have one (transfer.{tx,rx}_dma is zero), or use PIO */ diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 8f3f72480e78..ed57598394de 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h @@ -129,10 +129,23 @@ struct srcu_struct { #define SRCU_STATE_SCAN1 1 #define SRCU_STATE_SCAN2 2 +/* + * Values for initializing gp sequence fields. Higher values allow wrap arounds to + * occur earlier. + * The second value with state is useful in the case of static initialization of + * srcu_usage where srcu_gp_seq_needed is expected to have some state value in its + * lower bits (or else it will appear to be already initialized within + * the call check_init_srcu_struct()). + */ +#define SRCU_GP_SEQ_INITIAL_VAL ((0UL - 100UL) << RCU_SEQ_CTR_SHIFT) +#define SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE (SRCU_GP_SEQ_INITIAL_VAL - 1) + #define __SRCU_USAGE_INIT(name) \ { \ .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ - .srcu_gp_seq_needed = -1UL, \ + .srcu_gp_seq = SRCU_GP_SEQ_INITIAL_VAL, \ + .srcu_gp_seq_needed = SRCU_GP_SEQ_INITIAL_VAL_WITH_STATE, \ + .srcu_gp_seq_needed_exp = SRCU_GP_SEQ_INITIAL_VAL, \ .work = __DELAYED_WORK_INITIALIZER(name.work, NULL, 0), \ } diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 84e13bd5df28..d79ff252cfdc 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h @@ -100,6 +100,7 @@ struct stmmac_dma_cfg { bool eame; bool multi_msi_en; bool dche; + bool atds; }; #define AXI_BLEN 7 @@ -137,33 +138,6 @@ struct stmmac_txq_cfg { int tbs_en; }; -/* FPE link state */ -enum stmmac_fpe_state { - FPE_STATE_OFF = 0, - FPE_STATE_CAPABLE = 1, - FPE_STATE_ENTERING_ON = 2, - FPE_STATE_ON = 3, -}; - -/* FPE link-partner hand-shaking mPacket type */ -enum stmmac_mpacket_type { - MPACKET_VERIFY = 0, - MPACKET_RESPONSE = 1, -}; - -enum stmmac_fpe_task_state_t { - __FPE_REMOVING, - __FPE_TASK_SCHED, -}; - -struct stmmac_fpe_cfg { - bool enable; /* FPE enable */ - bool hs_enable; /* FPE handshake enable */ - enum stmmac_fpe_state lp_fpe_state; /* Link Partner FPE state */ - enum stmmac_fpe_state lo_fpe_state; /* Local station FPE state */ - u32 fpe_csr; /* MAC_FPE_CTRL_STS reg cache */ -}; - struct stmmac_safety_feature_cfg { u32 tsoee; u32 mrxpee; @@ -231,7 +205,6 @@ struct plat_stmmacenet_data { struct fwnode_handle *port_node; struct device_node *mdio_node; struct stmmac_dma_cfg *dma_cfg; - struct stmmac_fpe_cfg *fpe_cfg; struct stmmac_safety_feature_cfg *safety_feat_cfg; int clk_csr; int has_gmac; diff --git a/include/linux/string.h b/include/linux/string.h index 9edace076ddb..0dd27afcfaf7 100644 --- a/include/linux/string.h +++ b/include/linux/string.h @@ -76,12 +76,16 @@ ssize_t sized_strscpy(char *, const char *, size_t); * known size. */ #define __strscpy0(dst, src, ...) \ - sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst)) -#define __strscpy1(dst, src, size) sized_strscpy(dst, src, size) + sized_strscpy(dst, src, sizeof(dst) + __must_be_array(dst) + \ + __must_be_cstr(dst) + __must_be_cstr(src)) +#define __strscpy1(dst, src, size) \ + sized_strscpy(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src)) #define __strscpy_pad0(dst, src, ...) \ - sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst)) -#define __strscpy_pad1(dst, src, size) sized_strscpy_pad(dst, src, size) + sized_strscpy_pad(dst, src, sizeof(dst) + __must_be_array(dst) + \ + __must_be_cstr(dst) + __must_be_cstr(src)) +#define __strscpy_pad1(dst, src, size) \ + sized_strscpy_pad(dst, src, size + __must_be_cstr(dst) + __must_be_cstr(src)) /** * strscpy - Copy a C-string into a sized buffer @@ -279,6 +283,18 @@ static inline void memcpy_flushcache(void *dst, const void *src, size_t cnt) void *memchr_inv(const void *s, int c, size_t n); char *strreplace(char *str, char old, char new); +/** + * mem_is_zero - Check if an area of memory is all 0's. + * @s: The memory area + * @n: The size of the area + * + * Return: True if the area of memory is all 0's. + */ +static inline bool mem_is_zero(const void *s, size_t n) +{ + return !memchr_inv(s, 0, n); +} + extern void kfree_const(const void *x); extern char *kstrdup(const char *s, gfp_t gfp) __malloc; diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h index d9ebe20229f8..120ca0f28e95 100644 --- a/include/linux/string_choices.h +++ b/include/linux/string_choices.h @@ -2,17 +2,32 @@ #ifndef _LINUX_STRING_CHOICES_H_ #define _LINUX_STRING_CHOICES_H_ +/* + * Here provide a series of helpers in the str_$TRUE_$FALSE format (you can + * also expand some helpers as needed), where $TRUE and $FALSE are their + * corresponding literal strings. These helpers can be used in the printing + * and also in other places where constant strings are required. Using these + * helpers offers the following benefits: + * 1) Reducing the hardcoding of strings, which makes the code more elegant + * through these simple literal-meaning helpers. + * 2) Unifying the output, which prevents the same string from being printed + * in various forms, such as enable/disable, enabled/disabled, en/dis. + * 3) Deduping by the linker, which results in a smaller binary file. + */ + #include <linux/types.h> static inline const char *str_enable_disable(bool v) { return v ? "enable" : "disable"; } +#define str_disable_enable(v) str_enable_disable(!(v)) static inline const char *str_enabled_disabled(bool v) { return v ? "enabled" : "disabled"; } +#define str_disabled_enabled(v) str_enabled_disabled(!(v)) static inline const char *str_hi_lo(bool v) { @@ -36,11 +51,25 @@ static inline const char *str_on_off(bool v) { return v ? "on" : "off"; } +#define str_off_on(v) str_on_off(!(v)) static inline const char *str_yes_no(bool v) { return v ? "yes" : "no"; } +#define str_no_yes(v) str_yes_no(!(v)) + +static inline const char *str_up_down(bool v) +{ + return v ? "up" : "down"; +} +#define str_down_up(v) str_up_down(!(v)) + +static inline const char *str_true_false(bool v) +{ + return v ? "true" : "false"; +} +#define str_false_true(v) str_true_false(!(v)) /** * str_plural - Return the simple pluralization based on English counts diff --git a/include/linux/sungem_phy.h b/include/linux/sungem_phy.h index c505f30e8b68..eecc7eb63bfb 100644 --- a/include/linux/sungem_phy.h +++ b/include/linux/sungem_phy.h @@ -40,7 +40,7 @@ enum { /* An instance of a PHY, partially borrowed from mii_if_info */ struct mii_phy { - struct mii_phy_def* def; + const struct mii_phy_def *def; u32 advertising; int mii_id; diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 0c77ba488bba..fec1e8a1570c 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h @@ -151,13 +151,15 @@ struct rpc_task_setup { #define RPC_WAS_SENT(t) ((t)->tk_flags & RPC_TASK_SENT) #define RPC_IS_MOVEABLE(t) ((t)->tk_flags & RPC_TASK_MOVEABLE) -#define RPC_TASK_RUNNING 0 -#define RPC_TASK_QUEUED 1 -#define RPC_TASK_ACTIVE 2 -#define RPC_TASK_NEED_XMIT 3 -#define RPC_TASK_NEED_RECV 4 -#define RPC_TASK_MSG_PIN_WAIT 5 -#define RPC_TASK_SIGNALLED 6 +enum { + RPC_TASK_RUNNING, + RPC_TASK_QUEUED, + RPC_TASK_ACTIVE, + RPC_TASK_NEED_XMIT, + RPC_TASK_NEED_RECV, + RPC_TASK_MSG_PIN_WAIT, + RPC_TASK_SIGNALLED, +}; #define rpc_test_and_set_running(t) \ test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate) diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h index a7d0406b9ef5..e68fecf6eab5 100644 --- a/include/linux/sunrpc/svc.h +++ b/include/linux/sunrpc/svc.h @@ -21,6 +21,7 @@ #include <linux/wait.h> #include <linux/mm.h> #include <linux/pagevec.h> +#include <linux/kthread.h> /* * @@ -33,9 +34,9 @@ * node traffic on multi-node NUMA NFS servers. */ struct svc_pool { - unsigned int sp_id; /* pool id; also node id on NUMA */ + unsigned int sp_id; /* pool id; also node id on NUMA */ struct lwq sp_xprts; /* pending transports */ - atomic_t sp_nrthreads; /* # of threads in pool */ + unsigned int sp_nrthreads; /* # of threads in pool */ struct list_head sp_all_threads; /* all server threads */ struct llist_head sp_idle_threads; /* idle server threads */ @@ -66,9 +67,10 @@ enum { * We currently do not support more than one RPC program per daemon. */ struct svc_serv { - struct svc_program * sv_program; /* RPC program */ + struct svc_program * sv_programs; /* RPC programs */ struct svc_stat * sv_stats; /* RPC statistics */ spinlock_t sv_lock; + unsigned int sv_nprogs; /* Number of sv_programs */ unsigned int sv_nrthreads; /* # of server threads */ unsigned int sv_maxconn; /* max connections allowed or * '0' causing max to be based @@ -232,6 +234,11 @@ struct svc_rqst { struct net *rq_bc_net; /* pointer to backchannel's * net namespace */ + + int rq_err; /* Thread sets this to inidicate + * initialisation success. + */ + unsigned long bc_to_initval; unsigned int bc_to_retries; void ** rq_lease_breaker; /* The v4 client breaking a lease */ @@ -305,6 +312,31 @@ static inline bool svc_thread_should_stop(struct svc_rqst *rqstp) return test_bit(RQ_VICTIM, &rqstp->rq_flags); } +/** + * svc_thread_init_status - report whether thread has initialised successfully + * @rqstp: the thread in question + * @err: errno code + * + * After performing any initialisation that could fail, and before starting + * normal work, each sunrpc svc_thread must call svc_thread_init_status() + * with an appropriate error, or zero. + * + * If zero is passed, the thread is ready and must continue until + * svc_thread_should_stop() returns true. If a non-zero error is passed + * the call will not return - the thread will exit. + */ +static inline void svc_thread_init_status(struct svc_rqst *rqstp, int err) +{ + rqstp->rq_err = err; + /* memory barrier ensures assignment to error above is visible before + * waitqueue_active() test below completes. + */ + smp_mb(); + wake_up_var(&rqstp->rq_err); + if (err) + kthread_exit(1); +} + struct svc_deferred_req { u32 prot; /* protocol (UDP or TCP) */ struct svc_xprt *xprt; @@ -329,10 +361,9 @@ struct svc_process_info { }; /* - * List of RPC programs on the same transport endpoint + * RPC program - an array of these can use the same transport endpoint */ struct svc_program { - struct svc_program * pg_next; /* other programs (same xprt) */ u32 pg_prog; /* program number */ unsigned int pg_lovers; /* lowest version */ unsigned int pg_hivers; /* highest version */ @@ -401,19 +432,16 @@ struct svc_procedure { */ int sunrpc_set_pool_mode(const char *val); int sunrpc_get_pool_mode(char *val, size_t size); -int svc_rpcb_setup(struct svc_serv *serv, struct net *net); void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); int svc_bind(struct svc_serv *serv, struct net *net); struct svc_serv *svc_create(struct svc_program *, unsigned int, int (*threadfn)(void *data)); -struct svc_rqst *svc_rqst_alloc(struct svc_serv *serv, - struct svc_pool *pool, int node); bool svc_rqst_replace_page(struct svc_rqst *rqstp, struct page *page); void svc_rqst_release_pages(struct svc_rqst *rqstp); -void svc_rqst_free(struct svc_rqst *); void svc_exit_thread(struct svc_rqst *); struct svc_serv * svc_create_pooled(struct svc_program *prog, + unsigned int nprog, struct svc_stat *stats, unsigned int bufsize, int (*threadfn)(void *data)); @@ -446,11 +474,6 @@ int svc_generic_rpcbind_set(struct net *net, u32 version, int family, unsigned short proto, unsigned short port); -int svc_rpcbind_set_version(struct net *net, - const struct svc_program *progp, - u32 version, int family, - unsigned short proto, - unsigned short port); #define RPC_MAX_ADDRBUFLEN (63U) diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index d33bab33099a..619fc0bd837a 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h @@ -48,6 +48,7 @@ #include <linux/sunrpc/rpc_rdma.h> #include <linux/sunrpc/rpc_rdma_cid.h> #include <linux/sunrpc/svc_rdma_pcl.h> +#include <linux/sunrpc/rdma_rn.h> #include <linux/percpu_counter.h> #include <rdma/ib_verbs.h> @@ -76,6 +77,7 @@ struct svcxprt_rdma { struct svc_xprt sc_xprt; /* SVC transport structure */ struct rdma_cm_id *sc_cm_id; /* RDMA connection id */ struct list_head sc_accept_q; /* Conn. waiting accept */ + struct rpcrdma_notification sc_rn; /* removal notification */ int sc_ord; /* RDMA read limit */ int sc_max_send_sges; bool sc_snd_w_inv; /* OK to use Send With Invalidate */ diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h index 61c455f1e1f5..2e111153f7cd 100644 --- a/include/linux/sunrpc/svcauth.h +++ b/include/linux/sunrpc/svcauth.h @@ -14,6 +14,7 @@ #include <linux/sunrpc/msg_prot.h> #include <linux/sunrpc/cache.h> #include <linux/sunrpc/gss_api.h> +#include <linux/sunrpc/clnt.h> #include <linux/hash.h> #include <linux/stringhash.h> #include <linux/cred.h> @@ -151,13 +152,16 @@ struct auth_ops { struct svc_xprt; -extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp); extern rpc_authflavor_t svc_auth_flavor(struct svc_rqst *rqstp); extern int svc_authorise(struct svc_rqst *rqstp); extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp); extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops); extern void svc_auth_unregister(rpc_authflavor_t flavor); +extern void svcauth_map_clnt_to_svc_cred_local(struct rpc_clnt *clnt, + const struct cred *, + struct svc_cred *); + extern struct auth_domain *unix_domain_find(char *name); extern void auth_domain_put(struct auth_domain *item); extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new); diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 7c78ec6356b9..bf45d9e8492a 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h @@ -58,8 +58,6 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk) */ void svc_recv(struct svc_rqst *rqstp); void svc_send(struct svc_rqst *rqstp); -void svc_drop(struct svc_rqst *); -void svc_sock_update_bufs(struct svc_serv *serv); int svc_addsock(struct svc_serv *serv, struct net *net, const int fd, char *name_return, const size_t len, const struct cred *cred); diff --git a/include/linux/sunrpc/xdrgen/_builtins.h b/include/linux/sunrpc/xdrgen/_builtins.h new file mode 100644 index 000000000000..66ca3ece951a --- /dev/null +++ b/include/linux/sunrpc/xdrgen/_builtins.h @@ -0,0 +1,243 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Oracle and/or its affiliates. + * + * This header defines XDR data type primitives specified in + * Section 4 of RFC 4506, used by RPC programs implemented + * in the Linux kernel. + */ + +#ifndef _SUNRPC_XDRGEN__BUILTINS_H_ +#define _SUNRPC_XDRGEN__BUILTINS_H_ + +#include <linux/sunrpc/xdr.h> + +static inline bool +xdrgen_decode_void(struct xdr_stream *xdr) +{ + return true; +} + +static inline bool +xdrgen_encode_void(struct xdr_stream *xdr) +{ + return true; +} + +static inline bool +xdrgen_decode_bool(struct xdr_stream *xdr, bool *ptr) +{ + __be32 *p = xdr_inline_decode(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *ptr = (*p != xdr_zero); + return true; +} + +static inline bool +xdrgen_encode_bool(struct xdr_stream *xdr, bool val) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *p = val ? xdr_one : xdr_zero; + return true; +} + +static inline bool +xdrgen_decode_int(struct xdr_stream *xdr, s32 *ptr) +{ + __be32 *p = xdr_inline_decode(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *ptr = be32_to_cpup(p); + return true; +} + +static inline bool +xdrgen_encode_int(struct xdr_stream *xdr, s32 val) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *p = cpu_to_be32(val); + return true; +} + +static inline bool +xdrgen_decode_unsigned_int(struct xdr_stream *xdr, u32 *ptr) +{ + __be32 *p = xdr_inline_decode(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *ptr = be32_to_cpup(p); + return true; +} + +static inline bool +xdrgen_encode_unsigned_int(struct xdr_stream *xdr, u32 val) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *p = cpu_to_be32(val); + return true; +} + +static inline bool +xdrgen_decode_long(struct xdr_stream *xdr, s32 *ptr) +{ + __be32 *p = xdr_inline_decode(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *ptr = be32_to_cpup(p); + return true; +} + +static inline bool +xdrgen_encode_long(struct xdr_stream *xdr, s32 val) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *p = cpu_to_be32(val); + return true; +} + +static inline bool +xdrgen_decode_unsigned_long(struct xdr_stream *xdr, u32 *ptr) +{ + __be32 *p = xdr_inline_decode(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *ptr = be32_to_cpup(p); + return true; +} + +static inline bool +xdrgen_encode_unsigned_long(struct xdr_stream *xdr, u32 val) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT); + + if (unlikely(!p)) + return false; + *p = cpu_to_be32(val); + return true; +} + +static inline bool +xdrgen_decode_hyper(struct xdr_stream *xdr, s64 *ptr) +{ + __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2); + + if (unlikely(!p)) + return false; + *ptr = get_unaligned_be64(p); + return true; +} + +static inline bool +xdrgen_encode_hyper(struct xdr_stream *xdr, s64 val) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2); + + if (unlikely(!p)) + return false; + put_unaligned_be64(val, p); + return true; +} + +static inline bool +xdrgen_decode_unsigned_hyper(struct xdr_stream *xdr, u64 *ptr) +{ + __be32 *p = xdr_inline_decode(xdr, XDR_UNIT * 2); + + if (unlikely(!p)) + return false; + *ptr = get_unaligned_be64(p); + return true; +} + +static inline bool +xdrgen_encode_unsigned_hyper(struct xdr_stream *xdr, u64 val) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT * 2); + + if (unlikely(!p)) + return false; + put_unaligned_be64(val, p); + return true; +} + +static inline bool +xdrgen_decode_string(struct xdr_stream *xdr, string *ptr, u32 maxlen) +{ + __be32 *p; + u32 len; + + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return false; + if (unlikely(maxlen && len > maxlen)) + return false; + if (len != 0) { + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return false; + ptr->data = (unsigned char *)p; + } + ptr->len = len; + return true; +} + +static inline bool +xdrgen_encode_string(struct xdr_stream *xdr, string val, u32 maxlen) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len)); + + if (unlikely(!p)) + return false; + xdr_encode_opaque(p, val.data, val.len); + return true; +} + +static inline bool +xdrgen_decode_opaque(struct xdr_stream *xdr, opaque *ptr, u32 maxlen) +{ + __be32 *p; + u32 len; + + if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0)) + return false; + if (unlikely(maxlen && len > maxlen)) + return false; + if (len != 0) { + p = xdr_inline_decode(xdr, len); + if (unlikely(!p)) + return false; + ptr->data = (u8 *)p; + } + ptr->len = len; + return true; +} + +static inline bool +xdrgen_encode_opaque(struct xdr_stream *xdr, opaque val) +{ + __be32 *p = xdr_reserve_space(xdr, XDR_UNIT + xdr_align_size(val.len)); + + if (unlikely(!p)) + return false; + xdr_encode_opaque(p, val.data, val.len); + return true; +} + +#endif /* _SUNRPC_XDRGEN__BUILTINS_H_ */ diff --git a/include/linux/sunrpc/xdrgen/_defs.h b/include/linux/sunrpc/xdrgen/_defs.h new file mode 100644 index 000000000000..be9e62371758 --- /dev/null +++ b/include/linux/sunrpc/xdrgen/_defs.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2024 Oracle and/or its affiliates. + * + * This header defines XDR data type primitives specified in + * Section 4 of RFC 4506, used by RPC programs implemented + * in the Linux kernel. + */ + +#ifndef _SUNRPC_XDRGEN__DEFS_H_ +#define _SUNRPC_XDRGEN__DEFS_H_ + +#define TRUE (true) +#define FALSE (false) + +typedef struct { + u32 len; + unsigned char *data; +} string; + +typedef struct { + u32 len; + u8 *data; +} opaque; + +#endif /* _SUNRPC_XDRGEN__DEFS_H_ */ diff --git a/include/linux/swap.h b/include/linux/swap.h index ba7ea95d1c57..ca533b478c21 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -243,22 +243,24 @@ enum { * free clusters are organized into a list. We fetch an entry from the list to * get a free cluster. * - * The data field stores next cluster if the cluster is free or cluster usage - * counter otherwise. The flags field determines if a cluster is free. This is - * protected by swap_info_struct.lock. + * The flags field determines if a cluster is free. This is + * protected by cluster lock. */ struct swap_cluster_info { spinlock_t lock; /* * Protect swap_cluster_info fields - * and swap_info_struct->swap_map - * elements correspond to the swap - * cluster + * other than list, and swap_info_struct->swap_map + * elements corresponding to the swap cluster. */ - unsigned int data:24; - unsigned int flags:8; + u16 count; + u8 flags; + u8 order; + struct list_head list; }; #define CLUSTER_FLAG_FREE 1 /* This cluster is free */ -#define CLUSTER_FLAG_NEXT_NULL 2 /* This cluster has no next cluster */ +#define CLUSTER_FLAG_NONFULL 2 /* This cluster is on nonfull list */ +#define CLUSTER_FLAG_FRAG 4 /* This cluster is on nonfull list */ +#define CLUSTER_FLAG_FULL 8 /* This cluster is on full list */ /* * The first page in the swap file is the swap header, which is always marked @@ -283,11 +285,6 @@ struct percpu_cluster { unsigned int next[SWAP_NR_ORDERS]; /* Likely next allocation offset */ }; -struct swap_cluster_list { - struct swap_cluster_info head; - struct swap_cluster_info tail; -}; - /* * The in-memory structure used to track swap areas. */ @@ -299,8 +296,15 @@ struct swap_info_struct { signed char type; /* strange name for an index */ unsigned int max; /* extent of the swap_map */ unsigned char *swap_map; /* vmalloc'ed array of usage counts */ + unsigned long *zeromap; /* kvmalloc'ed bitmap to track zero pages */ struct swap_cluster_info *cluster_info; /* cluster info. Only for SSD */ - struct swap_cluster_list free_clusters; /* free clusters list */ + struct list_head free_clusters; /* free clusters list */ + struct list_head full_clusters; /* full clusters list */ + struct list_head nonfull_clusters[SWAP_NR_ORDERS]; + /* list of cluster that contains at least one free slot */ + struct list_head frag_clusters[SWAP_NR_ORDERS]; + /* list of cluster that are fragmented or contented */ + unsigned int frag_cluster_nr[SWAP_NR_ORDERS]; unsigned int lowest_bit; /* index of first free in swap_map */ unsigned int highest_bit; /* index of last free in swap_map */ unsigned int pages; /* total of usable pages of swap */ @@ -331,7 +335,7 @@ struct swap_info_struct { * list. */ struct work_struct discard_work; /* discard worker */ - struct swap_cluster_list discard_clusters; /* discard clusters list */ + struct list_head discard_clusters; /* discard clusters list */ struct plist_node avail_lists[]; /* * entries in swap_avail_heads, one * entry per node. @@ -478,9 +482,9 @@ void put_swap_folio(struct folio *folio, swp_entry_t entry); extern swp_entry_t get_swap_page_of_type(int); extern int get_swap_pages(int n, swp_entry_t swp_entries[], int order); extern int add_swap_count_continuation(swp_entry_t, gfp_t); -extern void swap_shmem_alloc(swp_entry_t); +extern void swap_shmem_alloc(swp_entry_t, int); extern int swap_duplicate(swp_entry_t); -extern int swapcache_prepare(swp_entry_t); +extern int swapcache_prepare(swp_entry_t entry, int nr); extern void swap_free_nr(swp_entry_t entry, int nr_pages); extern void swapcache_free_entries(swp_entry_t *entries, int n); extern void free_swap_and_cache_nr(swp_entry_t entry, int nr); @@ -545,7 +549,7 @@ static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask) return 0; } -static inline void swap_shmem_alloc(swp_entry_t swp) +static inline void swap_shmem_alloc(swp_entry_t swp, int nr) { } @@ -554,7 +558,7 @@ static inline int swap_duplicate(swp_entry_t swp) return 0; } -static inline int swapcache_prepare(swp_entry_t swp) +static inline int swapcache_prepare(swp_entry_t swp, int nr) { return 0; } diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 4bcf6754738d..5758104921e6 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h @@ -870,7 +870,7 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, #endif asmlinkage long sys_name_to_handle_at(int dfd, const char __user *name, struct file_handle __user *handle, - int __user *mnt_id, int flag); + void __user *mnt_id, int flag); asmlinkage long sys_open_by_handle_at(int mountdirfd, struct file_handle __user *handle, int flags); diff --git a/include/linux/tee_core.h b/include/linux/tee_core.h index efd16ed52315..a38494d6b5f4 100644 --- a/include/linux/tee_core.h +++ b/include/linux/tee_core.h @@ -155,6 +155,18 @@ int tee_device_register(struct tee_device *teedev); void tee_device_unregister(struct tee_device *teedev); /** + * tee_device_set_dev_groups() - Set device attribute groups + * @teedev: Device to register + * @dev_groups: Attribute groups + * + * Assigns the provided @dev_groups to the @teedev to be registered later + * with tee_device_register(). Calling this function is optional, but if + * it's called it must be called before tee_device_register(). + */ +void tee_device_set_dev_groups(struct tee_device *teedev, + const struct attribute_group **dev_groups); + +/** * tee_session_calc_client_uuid() - Calculates client UUID for session * @uuid: Resulting UUID * @connection_method: Connection method for session (TEE_IOCTL_LOGIN_*) diff --git a/include/linux/thermal.h b/include/linux/thermal.h index b86ddca46b9e..25ea8fe2313e 100644 --- a/include/linux/thermal.h +++ b/include/linux/thermal.h @@ -85,11 +85,17 @@ struct thermal_trip { struct thermal_zone_device; +struct cooling_spec { + unsigned long upper; /* Highest cooling state */ + unsigned long lower; /* Lowest cooling state */ + unsigned int weight; /* Cooling device weight */ +}; + struct thermal_zone_device_ops { - int (*bind) (struct thermal_zone_device *, - struct thermal_cooling_device *); - int (*unbind) (struct thermal_zone_device *, - struct thermal_cooling_device *); + bool (*should_bind) (struct thermal_zone_device *, + const struct thermal_trip *, + struct thermal_cooling_device *, + struct cooling_spec *); int (*get_temp) (struct thermal_zone_device *, int *); int (*set_trips) (struct thermal_zone_device *, int, int); int (*change_mode) (struct thermal_zone_device *, @@ -203,15 +209,12 @@ static inline void devm_thermal_of_zone_unregister(struct device *dev, } #endif -int thermal_zone_get_trip(struct thermal_zone_device *tz, int trip_id, - struct thermal_trip *trip); int for_each_thermal_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data); int thermal_zone_for_each_trip(struct thermal_zone_device *tz, int (*cb)(struct thermal_trip *, void *), void *data); -int thermal_zone_get_num_trips(struct thermal_zone_device *tz); void thermal_zone_set_trip_temp(struct thermal_zone_device *tz, struct thermal_trip *trip, int temp); @@ -240,20 +243,6 @@ const char *thermal_zone_device_type(struct thermal_zone_device *tzd); int thermal_zone_device_id(struct thermal_zone_device *tzd); struct device *thermal_zone_device(struct thermal_zone_device *tzd); -int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz, - const struct thermal_trip *trip, - struct thermal_cooling_device *cdev, - unsigned long upper, unsigned long lower, - unsigned int weight); -int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int, - struct thermal_cooling_device *, - unsigned long, unsigned long, - unsigned int); -int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz, - const struct thermal_trip *trip, - struct thermal_cooling_device *cdev); -int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int, - struct thermal_cooling_device *); void thermal_zone_device_update(struct thermal_zone_device *, enum thermal_notify_event); diff --git a/include/linux/timekeeper_internal.h b/include/linux/timekeeper_internal.h index 84ff2844df2a..902c20ef495a 100644 --- a/include/linux/timekeeper_internal.h +++ b/include/linux/timekeeper_internal.h @@ -73,7 +73,7 @@ struct tk_read_base { * @overflow_seen: Overflow warning flag (DEBUG_TIMEKEEPING) * * Note: For timespec(64) based interfaces wall_to_monotonic is what - * we need to add to xtime (or xtime corrected for sub jiffie times) + * we need to add to xtime (or xtime corrected for sub jiffy times) * to get to monotonic time. Monotonic is pegged at zero at system * boot time, so wall_to_monotonic will be negative, however, we will * ALWAYS keep the tv_nsec part positive so we can use the usual diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index d8e4105a2f21..39c7cf82b0c2 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h @@ -33,6 +33,13 @@ }) #endif +#ifdef masked_user_access_begin + #define can_do_masked_user_access() 1 +#else + #define can_do_masked_user_access() 0 + #define masked_user_access_begin(src) NULL +#endif + /* * Architectures should provide two primitives (raw_copy_{to,from}_user()) * and get rid of their private instances of copy_{to,from}_user() and diff --git a/include/linux/ubsan.h b/include/linux/ubsan.h index bff7445498de..d8219cbe09ff 100644 --- a/include/linux/ubsan.h +++ b/include/linux/ubsan.h @@ -4,6 +4,11 @@ #ifdef CONFIG_UBSAN_TRAP const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type); +#else +static inline const char *report_ubsan_failure(struct pt_regs *regs, u32 check_type) +{ + return NULL; +} #endif #endif diff --git a/include/linux/uio.h b/include/linux/uio.h index 7020adedfa08..853f9de5aa05 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h @@ -11,6 +11,7 @@ #include <uapi/linux/uio.h> struct page; +struct folio_queue; typedef unsigned int __bitwise iov_iter_extraction_t; @@ -25,6 +26,7 @@ enum iter_type { ITER_IOVEC, ITER_BVEC, ITER_KVEC, + ITER_FOLIOQ, ITER_XARRAY, ITER_DISCARD, }; @@ -66,6 +68,7 @@ struct iov_iter { const struct iovec *__iov; const struct kvec *kvec; const struct bio_vec *bvec; + const struct folio_queue *folioq; struct xarray *xarray; void __user *ubuf; }; @@ -74,6 +77,7 @@ struct iov_iter { }; union { unsigned long nr_segs; + u8 folioq_slot; loff_t xarray_start; }; }; @@ -126,6 +130,11 @@ static inline bool iov_iter_is_discard(const struct iov_iter *i) return iov_iter_type(i) == ITER_DISCARD; } +static inline bool iov_iter_is_folioq(const struct iov_iter *i) +{ + return iov_iter_type(i) == ITER_FOLIOQ; +} + static inline bool iov_iter_is_xarray(const struct iov_iter *i) { return iov_iter_type(i) == ITER_XARRAY; @@ -180,6 +189,12 @@ static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset, return copy_page_to_iter(&folio->page, offset, bytes, i); } +static inline size_t copy_folio_from_iter(struct folio *folio, size_t offset, + size_t bytes, struct iov_iter *i) +{ + return copy_page_from_iter(&folio->page, offset, bytes, i); +} + static inline size_t copy_folio_from_iter_atomic(struct folio *folio, size_t offset, size_t bytes, struct iov_iter *i) { @@ -273,6 +288,9 @@ void iov_iter_kvec(struct iov_iter *i, unsigned int direction, const struct kvec void iov_iter_bvec(struct iov_iter *i, unsigned int direction, const struct bio_vec *bvec, unsigned long nr_segs, size_t count); void iov_iter_discard(struct iov_iter *i, unsigned int direction, size_t count); +void iov_iter_folio_queue(struct iov_iter *i, unsigned int direction, + const struct folio_queue *folioq, + unsigned int first_slot, unsigned int offset, size_t count); void iov_iter_xarray(struct iov_iter *i, unsigned int direction, struct xarray *xarray, loff_t start, size_t count); ssize_t iov_iter_get_pages2(struct iov_iter *i, struct page **pages, diff --git a/include/linux/union_find.h b/include/linux/union_find.h new file mode 100644 index 000000000000..cfd49263c138 --- /dev/null +++ b/include/linux/union_find.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __LINUX_UNION_FIND_H +#define __LINUX_UNION_FIND_H +/** + * union_find.h - union-find data structure implementation + * + * This header provides functions and structures to implement the union-find + * data structure. The union-find data structure is used to manage disjoint + * sets and supports efficient union and find operations. + * + * See Documentation/core-api/union_find.rst for documentation and samples. + */ + +struct uf_node { + struct uf_node *parent; + unsigned int rank; +}; + +/* This macro is used for static initialization of a union-find node. */ +#define UF_INIT_NODE(node) {.parent = &node, .rank = 0} + +/** + * uf_node_init - Initialize a union-find node + * @node: pointer to the union-find node to be initialized + * + * This function sets the parent of the node to itself and + * initializes its rank to 0. + */ +static inline void uf_node_init(struct uf_node *node) +{ + node->parent = node; + node->rank = 0; +} + +/* find the root of a node */ +struct uf_node *uf_find(struct uf_node *node); + +/* Merge two intersecting nodes */ +void uf_union(struct uf_node *node1, struct uf_node *node2); + +#endif /* __LINUX_UNION_FIND_H */ diff --git a/include/linux/unroll.h b/include/linux/unroll.h new file mode 100644 index 000000000000..d42fd6366373 --- /dev/null +++ b/include/linux/unroll.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +/* + * Copyright (C) 2023 Google LLC. + */ + +#ifndef __UNROLL_H +#define __UNROLL_H + +#include <linux/args.h> + +#define UNROLL(N, MACRO, args...) CONCATENATE(__UNROLL_, N)(MACRO, args) + +#define __UNROLL_0(MACRO, args...) +#define __UNROLL_1(MACRO, args...) __UNROLL_0(MACRO, args) MACRO(0, args) +#define __UNROLL_2(MACRO, args...) __UNROLL_1(MACRO, args) MACRO(1, args) +#define __UNROLL_3(MACRO, args...) __UNROLL_2(MACRO, args) MACRO(2, args) +#define __UNROLL_4(MACRO, args...) __UNROLL_3(MACRO, args) MACRO(3, args) +#define __UNROLL_5(MACRO, args...) __UNROLL_4(MACRO, args) MACRO(4, args) +#define __UNROLL_6(MACRO, args...) __UNROLL_5(MACRO, args) MACRO(5, args) +#define __UNROLL_7(MACRO, args...) __UNROLL_6(MACRO, args) MACRO(6, args) +#define __UNROLL_8(MACRO, args...) __UNROLL_7(MACRO, args) MACRO(7, args) +#define __UNROLL_9(MACRO, args...) __UNROLL_8(MACRO, args) MACRO(8, args) +#define __UNROLL_10(MACRO, args...) __UNROLL_9(MACRO, args) MACRO(9, args) +#define __UNROLL_11(MACRO, args...) __UNROLL_10(MACRO, args) MACRO(10, args) +#define __UNROLL_12(MACRO, args...) __UNROLL_11(MACRO, args) MACRO(11, args) +#define __UNROLL_13(MACRO, args...) __UNROLL_12(MACRO, args) MACRO(12, args) +#define __UNROLL_14(MACRO, args...) __UNROLL_13(MACRO, args) MACRO(13, args) +#define __UNROLL_15(MACRO, args...) __UNROLL_14(MACRO, args) MACRO(14, args) +#define __UNROLL_16(MACRO, args...) __UNROLL_15(MACRO, args) MACRO(15, args) +#define __UNROLL_17(MACRO, args...) __UNROLL_16(MACRO, args) MACRO(16, args) +#define __UNROLL_18(MACRO, args...) __UNROLL_17(MACRO, args) MACRO(17, args) +#define __UNROLL_19(MACRO, args...) __UNROLL_18(MACRO, args) MACRO(18, args) +#define __UNROLL_20(MACRO, args...) __UNROLL_19(MACRO, args) MACRO(19, args) + +#endif /* __UNROLL_H */ diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index b503fafb7fb3..2b294bf1881f 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h @@ -16,6 +16,7 @@ #include <linux/types.h> #include <linux/wait.h> +struct uprobe; struct vm_area_struct; struct mm_struct; struct inode; @@ -27,22 +28,22 @@ struct page; #define MAX_URETPROBE_DEPTH 64 -enum uprobe_filter_ctx { - UPROBE_FILTER_REGISTER, - UPROBE_FILTER_UNREGISTER, - UPROBE_FILTER_MMAP, -}; - struct uprobe_consumer { + /* + * handler() can return UPROBE_HANDLER_REMOVE to signal the need to + * unregister uprobe for current process. If UPROBE_HANDLER_REMOVE is + * returned, filter() callback has to be implemented as well and it + * should return false to "confirm" the decision to uninstall uprobe + * for the current process. If filter() is omitted or returns true, + * UPROBE_HANDLER_REMOVE is effectively ignored. + */ int (*handler)(struct uprobe_consumer *self, struct pt_regs *regs); int (*ret_handler)(struct uprobe_consumer *self, unsigned long func, struct pt_regs *regs); - bool (*filter)(struct uprobe_consumer *self, - enum uprobe_filter_ctx ctx, - struct mm_struct *mm); + bool (*filter)(struct uprobe_consumer *self, struct mm_struct *mm); - struct uprobe_consumer *next; + struct list_head cons_node; }; #ifdef CONFIG_UPROBES @@ -76,6 +77,8 @@ struct uprobe_task { struct uprobe *active_uprobe; unsigned long xol_vaddr; + struct arch_uprobe *auprobe; + struct return_instance *return_instances; unsigned int depth; }; @@ -110,10 +113,10 @@ extern bool is_trap_insn(uprobe_opcode_t *insn); extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs); extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); -extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); -extern int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); -extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); -extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); +extern struct uprobe *uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc); +extern int uprobe_apply(struct uprobe *uprobe, struct uprobe_consumer *uc, bool); +extern void uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc); +extern void uprobe_unregister_sync(void); extern int uprobe_mmap(struct vm_area_struct *vma); extern void uprobe_munmap(struct vm_area_struct *vma, unsigned long start, unsigned long end); extern void uprobe_start_dup_mmap(void); @@ -151,22 +154,21 @@ static inline void uprobes_init(void) #define uprobe_get_trap_addr(regs) instruction_pointer(regs) -static inline int -uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) -{ - return -ENOSYS; -} -static inline int uprobe_register_refctr(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) +static inline struct uprobe * +uprobe_register(struct inode *inode, loff_t offset, loff_t ref_ctr_offset, struct uprobe_consumer *uc) { - return -ENOSYS; + return ERR_PTR(-ENOSYS); } static inline int -uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool add) +uprobe_apply(struct uprobe* uprobe, struct uprobe_consumer *uc, bool add) { return -ENOSYS; } static inline void -uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc) +uprobe_unregister_nosync(struct uprobe *uprobe, struct uprobe_consumer *uc) +{ +} +static inline void uprobe_unregister_sync(void) { } static inline int uprobe_mmap(struct vm_area_struct *vma) diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h index 6030a8235617..3625096d5f85 100644 --- a/include/linux/user_namespace.h +++ b/include/linux/user_namespace.h @@ -21,9 +21,11 @@ struct uid_gid_extent { }; struct uid_gid_map { /* 64 bytes -- 1 cache line */ - u32 nr_extents; union { - struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS]; + struct { + struct uid_gid_extent extent[UID_GID_MAP_MAX_BASE_EXTENTS]; + u32 nr_extents; + }; struct { struct uid_gid_extent *forward; struct uid_gid_extent *reverse; diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index a12bcf042551..9fc6ce15c499 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -267,6 +267,25 @@ extern void userfaultfd_unmap_complete(struct mm_struct *mm, extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma); extern bool userfaultfd_wp_async(struct vm_area_struct *vma); +void userfaultfd_reset_ctx(struct vm_area_struct *vma); + +struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi, + struct vm_area_struct *prev, + struct vm_area_struct *vma, + unsigned long start, + unsigned long end); + +int userfaultfd_register_range(struct userfaultfd_ctx *ctx, + struct vm_area_struct *vma, + unsigned long vm_flags, + unsigned long start, unsigned long end, + bool wp_async); + +void userfaultfd_release_new(struct userfaultfd_ctx *ctx); + +void userfaultfd_release_all(struct mm_struct *mm, + struct userfaultfd_ctx *ctx); + #else /* CONFIG_USERFAULTFD */ /* mm helpers */ diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h index 7977ca03ac7a..2e7a30fe6b92 100644 --- a/include/linux/vdpa.h +++ b/include/linux/vdpa.h @@ -582,11 +582,20 @@ void vdpa_set_status(struct vdpa_device *vdev, u8 status); * @dev: vdpa device to remove * Driver need to remove the specified device by calling * _vdpa_unregister_device(). + * @dev_set_attr: change a vdpa device's attr after it was create + * @mdev: parent device to use for device + * @dev: vdpa device structure + * @config:Attributes to be set for the device. + * The driver needs to check the mask of the structure and then set + * the related information to the vdpa device. The driver must return 0 + * if set successfully. */ struct vdpa_mgmtdev_ops { int (*dev_add)(struct vdpa_mgmt_dev *mdev, const char *name, const struct vdpa_dev_set_config *config); void (*dev_del)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev); + int (*dev_set_attr)(struct vdpa_mgmt_dev *mdev, struct vdpa_device *dev, + const struct vdpa_dev_set_config *config); }; /** diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 4b16844c6bc2..306137a15d07 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -118,7 +118,9 @@ struct virtio_admin_cmd { * struct virtio_device - representation of a device using virtio * @index: unique position on the virtio bus * @failed: saved value for VIRTIO_CONFIG_S_FAILED bit (for restore) - * @config_enabled: configuration change reporting enabled + * @config_core_enabled: configuration change reporting enabled by core + * @config_driver_disabled: configuration change reporting disabled by + * a driver * @config_change_pending: configuration change reported while disabled * @config_lock: protects configuration change reporting * @vqs_list_lock: protects @vqs. @@ -135,7 +137,8 @@ struct virtio_admin_cmd { struct virtio_device { int index; bool failed; - bool config_enabled; + bool config_core_enabled; + bool config_driver_disabled; bool config_change_pending; spinlock_t config_lock; spinlock_t vqs_list_lock; @@ -166,6 +169,10 @@ void __virtqueue_break(struct virtqueue *_vq); void __virtqueue_unbreak(struct virtqueue *_vq); void virtio_config_changed(struct virtio_device *dev); + +void virtio_config_driver_disable(struct virtio_device *dev); +void virtio_config_driver_enable(struct virtio_device *dev); + #ifdef CONFIG_PM_SLEEP int virtio_device_freeze(struct virtio_device *dev); int virtio_device_restore(struct virtio_device *dev); diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index 6c395a2600e8..276ca543ef44 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -173,7 +173,8 @@ retry: break; case SKB_GSO_TCPV4: case SKB_GSO_TCPV6: - if (skb->csum_offset != offsetof(struct tcphdr, check)) + if (skb->ip_summed == CHECKSUM_PARTIAL && + skb->csum_offset != offsetof(struct tcphdr, check)) return -EINVAL; break; } diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index c82089dee0c8..0387d64e2c66 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -133,6 +133,7 @@ struct virtio_vsock_sock { u32 tx_cnt; u32 peer_fwd_cnt; u32 peer_buf_alloc; + size_t bytes_unsent; /* Protected by rx_lock */ u32 fwd_cnt; @@ -193,6 +194,11 @@ s64 virtio_transport_stream_has_data(struct vsock_sock *vsk); s64 virtio_transport_stream_has_space(struct vsock_sock *vsk); u32 virtio_transport_seqpacket_has_data(struct vsock_sock *vsk); +ssize_t virtio_transport_unsent_bytes(struct vsock_sock *vsk); + +void virtio_transport_consume_skb_sent(struct sk_buff *skb, + bool consume); + int virtio_transport_do_socket_init(struct vsock_sock *vsk, struct vsock_sock *psk); int diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h index 747943bc8cc2..aed952d04132 100644 --- a/include/linux/vm_event_item.h +++ b/include/linux/vm_event_item.h @@ -50,6 +50,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, PGSTEAL_ANON, PGSTEAL_FILE, #ifdef CONFIG_NUMA + PGSCAN_ZONE_RECLAIM_SUCCESS, PGSCAN_ZONE_RECLAIM_FAILED, #endif PGINODESTEAL, SLABS_SCANNED, KSWAPD_INODESTEAL, @@ -104,6 +105,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, THP_SPLIT_PAGE, THP_SPLIT_PAGE_FAILED, THP_DEFERRED_SPLIT_PAGE, + THP_UNDERUSED_SPLIT_PAGE, THP_SPLIT_PMD, THP_SCAN_EXCEED_NONE_PTE, THP_SCAN_EXCEED_SWAP_PTE, @@ -154,6 +156,30 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, VMA_LOCK_RETRY, VMA_LOCK_MISS, #endif +#ifdef CONFIG_DEBUG_STACK_USAGE + KSTACK_1K, +#if THREAD_SIZE > 1024 + KSTACK_2K, +#endif +#if THREAD_SIZE > 2048 + KSTACK_4K, +#endif +#if THREAD_SIZE > 4096 + KSTACK_8K, +#endif +#if THREAD_SIZE > 8192 + KSTACK_16K, +#endif +#if THREAD_SIZE > 16384 + KSTACK_32K, +#endif +#if THREAD_SIZE > 32768 + KSTACK_64K, +#endif +#if THREAD_SIZE > 65536 + KSTACK_REST, +#endif +#endif /* CONFIG_DEBUG_STACK_USAGE */ NR_VM_EVENT_ITEMS }; diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index e4a631ec430b..ad2ce7a6ab7a 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h @@ -189,6 +189,10 @@ extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1 extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2); #define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__)) +void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags) + __realloc_size(2); +#define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__)) + extern void vfree(const void *addr); extern void vfree_atomic(const void *addr); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 9eb77c9007e6..d2761bf8ff32 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -32,6 +32,7 @@ struct reclaim_stat { unsigned nr_ref_keep; unsigned nr_unmap_fail; unsigned nr_lazyfree_fail; + unsigned nr_demoted; }; /* Stat data for system wide items */ diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4eb8f9563136..59c2695e12e7 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -507,6 +507,47 @@ void workqueue_softirq_dead(unsigned int cpu); __printf(1, 4) struct workqueue_struct * alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); +#ifdef CONFIG_LOCKDEP +/** + * alloc_workqueue_lockdep_map - allocate a workqueue with user-defined lockdep_map + * @fmt: printf format for the name of the workqueue + * @flags: WQ_* flags + * @max_active: max in-flight work items, 0 for default + * @lockdep_map: user-defined lockdep_map + * @...: args for @fmt + * + * Same as alloc_workqueue but with the a user-define lockdep_map. Useful for + * workqueues created with the same purpose and to avoid leaking a lockdep_map + * on each workqueue creation. + * + * RETURNS: + * Pointer to the allocated workqueue on success, %NULL on failure. + */ +__printf(1, 5) struct workqueue_struct * +alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, + struct lockdep_map *lockdep_map, ...); + +/** + * alloc_ordered_workqueue_lockdep_map - allocate an ordered workqueue with + * user-defined lockdep_map + * + * @fmt: printf format for the name of the workqueue + * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful) + * @lockdep_map: user-defined lockdep_map + * @args: args for @fmt + * + * Same as alloc_ordered_workqueue but with the a user-define lockdep_map. + * Useful for workqueues created with the same purpose and to avoid leaking a + * lockdep_map on each workqueue creation. + * + * RETURNS: + * Pointer to the allocated workqueue on success, %NULL on failure. + */ +#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \ + alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \ + 1, lockdep_map, ##args) +#endif + /** * alloc_ordered_workqueue - allocate an ordered workqueue * @fmt: printf format for the name of the workqueue diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 1a54676d843a..d6db822e4bb3 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -79,6 +79,9 @@ struct writeback_control { */ struct swap_iocb **swap_plug; + /* Target list for splitting a large folio */ + struct list_head *list; + /* internal fields used by the ->writepages implementation: */ struct folio_batch fbatch; pgoff_t index; @@ -200,7 +203,8 @@ void inode_io_list_del(struct inode *inode); /* writeback.h requires fs.h; it, too, is not included from here. */ static inline void wait_on_inode(struct inode *inode) { - wait_on_bit(&inode->i_state, __I_NEW, TASK_UNINTERRUPTIBLE); + wait_var_event(inode_state_wait_address(inode, __I_NEW), + !(READ_ONCE(inode->i_state) & I_NEW)); } #ifdef CONFIG_CGROUP_WRITEBACK @@ -217,7 +221,7 @@ void wbc_account_cgroup_owner(struct writeback_control *wbc, struct page *page, size_t bytes); int cgroup_writeback_by_id(u64 bdi_id, int memcg_id, enum wb_reason reason, struct wb_completion *done); -void cgroup_writeback_umount(void); +void cgroup_writeback_umount(struct super_block *sb); bool cleanup_offline_cgwb(struct bdi_writeback *wb); /** @@ -324,7 +328,7 @@ static inline void wbc_account_cgroup_owner(struct writeback_control *wbc, { } -static inline void cgroup_writeback_umount(void) +static inline void cgroup_writeback_umount(struct super_block *sb) { } diff --git a/include/linux/xz.h b/include/linux/xz.h index 7285ca5d56e9..58ae1d746c6f 100644 --- a/include/linux/xz.h +++ b/include/linux/xz.h @@ -1,11 +1,10 @@ +/* SPDX-License-Identifier: 0BSD */ + /* * XZ decompressor * * Authors: Lasse Collin <lasse.collin@tukaani.org> * Igor Pavlov <https://7-zip.org/> - * - * This file has been put into the public domain. - * You can do whatever you want with this file. */ #ifndef XZ_H @@ -19,11 +18,6 @@ # include <stdint.h> #endif -/* In Linux, this is used to make extern functions static when needed. */ -#ifndef XZ_EXTERN -# define XZ_EXTERN extern -#endif - /** * enum xz_mode - Operation mode * @@ -143,7 +137,7 @@ struct xz_buf { size_t out_size; }; -/** +/* * struct xz_dec - Opaque type to hold the XZ decoder state */ struct xz_dec; @@ -191,7 +185,7 @@ struct xz_dec; * ready to be used with xz_dec_run(). If memory allocation fails, * xz_dec_init() returns NULL. */ -XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); +struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); /** * xz_dec_run() - Run the XZ decoder @@ -211,7 +205,7 @@ XZ_EXTERN struct xz_dec *xz_dec_init(enum xz_mode mode, uint32_t dict_max); * get that amount valid data from the beginning of the stream. You must use * the multi-call decoder if you don't want to uncompress the whole stream. */ -XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); +enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); /** * xz_dec_reset() - Reset an already allocated decoder state @@ -224,32 +218,38 @@ XZ_EXTERN enum xz_ret xz_dec_run(struct xz_dec *s, struct xz_buf *b); * xz_dec_run(). Thus, explicit call to xz_dec_reset() is useful only in * multi-call mode. */ -XZ_EXTERN void xz_dec_reset(struct xz_dec *s); +void xz_dec_reset(struct xz_dec *s); /** * xz_dec_end() - Free the memory allocated for the decoder state * @s: Decoder state allocated using xz_dec_init(). If s is NULL, * this function does nothing. */ -XZ_EXTERN void xz_dec_end(struct xz_dec *s); +void xz_dec_end(struct xz_dec *s); -/* - * Decompressor for MicroLZMA, an LZMA variant with a very minimal header. - * See xz_dec_microlzma_alloc() below for details. +/** + * DOC: MicroLZMA decompressor + * + * This MicroLZMA header format was created for use in EROFS but may be used + * by others too. **In most cases one needs the XZ APIs above instead.** * - * These functions aren't used or available in preboot code and thus aren't - * marked with XZ_EXTERN. This avoids warnings about static functions that - * are never defined. + * The compressed format supported by this decoder is a raw LZMA stream + * whose first byte (always 0x00) has been replaced with bitwise-negation + * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is + * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00. + * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream + * marker must not be used. The unused values are reserved for future use. */ -/** + +/* * struct xz_dec_microlzma - Opaque type to hold the MicroLZMA decoder state */ struct xz_dec_microlzma; /** * xz_dec_microlzma_alloc() - Allocate memory for the MicroLZMA decoder - * @mode XZ_SINGLE or XZ_PREALLOC - * @dict_size LZMA dictionary size. This must be at least 4 KiB and + * @mode: XZ_SINGLE or XZ_PREALLOC + * @dict_size: LZMA dictionary size. This must be at least 4 KiB and * at most 3 GiB. * * In contrast to xz_dec_init(), this function only allocates the memory @@ -262,40 +262,30 @@ struct xz_dec_microlzma; * On success, xz_dec_microlzma_alloc() returns a pointer to * struct xz_dec_microlzma. If memory allocation fails or * dict_size is invalid, NULL is returned. - * - * The compressed format supported by this decoder is a raw LZMA stream - * whose first byte (always 0x00) has been replaced with bitwise-negation - * of the LZMA properties (lc/lp/pb) byte. For example, if lc/lp/pb is - * 3/0/2, the first byte is 0xA2. This way the first byte can never be 0x00. - * Just like with LZMA2, lc + lp <= 4 must be true. The LZMA end-of-stream - * marker must not be used. The unused values are reserved for future use. - * This MicroLZMA header format was created for use in EROFS but may be used - * by others too. */ -extern struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode, - uint32_t dict_size); +struct xz_dec_microlzma *xz_dec_microlzma_alloc(enum xz_mode mode, + uint32_t dict_size); /** * xz_dec_microlzma_reset() - Reset the MicroLZMA decoder state - * @s Decoder state allocated using xz_dec_microlzma_alloc() - * @comp_size Compressed size of the input stream - * @uncomp_size Uncompressed size of the input stream. A value smaller + * @s: Decoder state allocated using xz_dec_microlzma_alloc() + * @comp_size: Compressed size of the input stream + * @uncomp_size: Uncompressed size of the input stream. A value smaller * than the real uncompressed size of the input stream can * be specified if uncomp_size_is_exact is set to false. * uncomp_size can never be set to a value larger than the * expected real uncompressed size because it would eventually * result in XZ_DATA_ERROR. - * @uncomp_size_is_exact This is an int instead of bool to avoid + * @uncomp_size_is_exact: This is an int instead of bool to avoid * requiring stdbool.h. This should normally be set to true. * When this is set to false, error detection is weaker. */ -extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, - uint32_t comp_size, uint32_t uncomp_size, - int uncomp_size_is_exact); +void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, uint32_t comp_size, + uint32_t uncomp_size, int uncomp_size_is_exact); /** * xz_dec_microlzma_run() - Run the MicroLZMA decoder - * @s Decoder state initialized using xz_dec_microlzma_reset() + * @s: Decoder state initialized using xz_dec_microlzma_reset() * @b: Input and output buffers * * This works similarly to xz_dec_run() with a few important differences. @@ -329,15 +319,14 @@ extern void xz_dec_microlzma_reset(struct xz_dec_microlzma *s, * may be changed normally like with XZ_PREALLOC. This way input data can be * provided from non-contiguous memory. */ -extern enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s, - struct xz_buf *b); +enum xz_ret xz_dec_microlzma_run(struct xz_dec_microlzma *s, struct xz_buf *b); /** * xz_dec_microlzma_end() - Free the memory allocated for the decoder state * @s: Decoder state allocated using xz_dec_microlzma_alloc(). * If s is NULL, this function does nothing. */ -extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s); +void xz_dec_microlzma_end(struct xz_dec_microlzma *s); /* * Standalone build (userspace build or in-kernel build for boot time use) @@ -358,13 +347,13 @@ extern void xz_dec_microlzma_end(struct xz_dec_microlzma *s); * This must be called before any other xz_* function to initialize * the CRC32 lookup table. */ -XZ_EXTERN void xz_crc32_init(void); +void xz_crc32_init(void); /* * Update CRC32 value using the polynomial from IEEE-802.3. To start a new * calculation, the third argument must be zero. To continue the calculation, * the previously returned value is passed as the third argument. */ -XZ_EXTERN uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); +uint32_t xz_crc32(const uint8_t *buf, size_t size, uint32_t crc); #endif #endif diff --git a/include/linux/zstd.h b/include/linux/zstd.h index 113408eef6ec..b2c7cf310c8f 100644 --- a/include/linux/zstd.h +++ b/include/linux/zstd.h @@ -77,6 +77,30 @@ int zstd_min_clevel(void); */ int zstd_max_clevel(void); +/** + * zstd_default_clevel() - default compression level + * + * Return: Default compression level. + */ +int zstd_default_clevel(void); + +/** + * struct zstd_custom_mem - custom memory allocation + */ +typedef ZSTD_customMem zstd_custom_mem; + +/** + * struct zstd_dict_load_method - Dictionary load method. + * See zstd_lib.h. + */ +typedef ZSTD_dictLoadMethod_e zstd_dict_load_method; + +/** + * struct zstd_dict_content_type - Dictionary context type. + * See zstd_lib.h. + */ +typedef ZSTD_dictContentType_e zstd_dict_content_type; + /* ====== Parameter Selection ====== */ /** @@ -136,6 +160,19 @@ typedef ZSTD_parameters zstd_parameters; zstd_parameters zstd_get_params(int level, unsigned long long estimated_src_size); + +/** + * zstd_get_cparams() - returns zstd_compression_parameters for selected level + * @level: The compression level + * @estimated_src_size: The estimated source size to compress or 0 + * if unknown. + * @dict_size: Dictionary size. + * + * Return: The selected zstd_compression_parameters. + */ +zstd_compression_parameters zstd_get_cparams(int level, + unsigned long long estimated_src_size, size_t dict_size); + /* ====== Single-pass Compression ====== */ typedef ZSTD_CCtx zstd_cctx; @@ -180,6 +217,71 @@ zstd_cctx *zstd_init_cctx(void *workspace, size_t workspace_size); size_t zstd_compress_cctx(zstd_cctx *cctx, void *dst, size_t dst_capacity, const void *src, size_t src_size, const zstd_parameters *parameters); +/** + * zstd_create_cctx_advanced() - Create compression context + * @custom_mem: Custom allocator. + * + * Return: NULL on error, pointer to compression context otherwise. + */ +zstd_cctx *zstd_create_cctx_advanced(zstd_custom_mem custom_mem); + +/** + * zstd_free_cctx() - Free compression context + * @cdict: Pointer to compression context. + * + * Return: Always 0. + */ +size_t zstd_free_cctx(zstd_cctx* cctx); + +/** + * struct zstd_cdict - Compression dictionary. + * See zstd_lib.h. + */ +typedef ZSTD_CDict zstd_cdict; + +/** + * zstd_create_cdict_byreference() - Create compression dictionary + * @dict: Pointer to dictionary buffer. + * @dict_size: Size of the dictionary buffer. + * @dict_load_method: Dictionary load method. + * @dict_content_type: Dictionary content type. + * @custom_mem: Memory allocator. + * + * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be + * free before zstd_cdict is destroyed. + * + * Return: NULL on error, pointer to compression dictionary + * otherwise. + */ +zstd_cdict *zstd_create_cdict_byreference(const void *dict, size_t dict_size, + zstd_compression_parameters cparams, + zstd_custom_mem custom_mem); + +/** + * zstd_free_cdict() - Free compression dictionary + * @cdict: Pointer to compression dictionary. + * + * Return: Always 0. + */ +size_t zstd_free_cdict(zstd_cdict* cdict); + +/** + * zstd_compress_using_cdict() - compress src into dst using a dictionary + * @cctx: The context. Must have been initialized with zstd_init_cctx(). + * @dst: The buffer to compress src into. + * @dst_capacity: The size of the destination buffer. May be any size, but + * ZSTD_compressBound(srcSize) is guaranteed to be large enough. + * @src: The data to compress. + * @src_size: The size of the data to compress. + * @cdict: The dictionary to be used. + * + * Return: The compressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_compress_using_cdict(zstd_cctx *cctx, void *dst, + size_t dst_capacity, const void *src, size_t src_size, + const zstd_cdict *cdict); + /* ====== Single-pass Decompression ====== */ typedef ZSTD_DCtx zstd_dctx; @@ -220,6 +322,71 @@ zstd_dctx *zstd_init_dctx(void *workspace, size_t workspace_size); size_t zstd_decompress_dctx(zstd_dctx *dctx, void *dst, size_t dst_capacity, const void *src, size_t src_size); +/** + * struct zstd_ddict - Decompression dictionary. + * See zstd_lib.h. + */ +typedef ZSTD_DDict zstd_ddict; + +/** + * zstd_create_ddict_byreference() - Create decompression dictionary + * @dict: Pointer to dictionary buffer. + * @dict_size: Size of the dictionary buffer. + * @dict_load_method: Dictionary load method. + * @dict_content_type: Dictionary content type. + * @custom_mem: Memory allocator. + * + * Note, this uses @dict by reference (ZSTD_dlm_byRef), so it should be + * free before zstd_ddict is destroyed. + * + * Return: NULL on error, pointer to decompression dictionary + * otherwise. + */ +zstd_ddict *zstd_create_ddict_byreference(const void *dict, size_t dict_size, + zstd_custom_mem custom_mem); +/** + * zstd_free_ddict() - Free decompression dictionary + * @dict: Pointer to the dictionary. + * + * Return: Always 0. + */ +size_t zstd_free_ddict(zstd_ddict *ddict); + +/** + * zstd_create_dctx_advanced() - Create decompression context + * @custom_mem: Custom allocator. + * + * Return: NULL on error, pointer to decompression context otherwise. + */ +zstd_dctx *zstd_create_dctx_advanced(zstd_custom_mem custom_mem); + +/** + * zstd_free_dctx() -- Free decompression context + * @dctx: Pointer to decompression context. + * Return: Always 0. + */ +size_t zstd_free_dctx(zstd_dctx *dctx); + +/** + * zstd_decompress_using_ddict() - decompress src into dst using a dictionary + * @dctx: The decompression context. + * @dst: The buffer to decompress src into. + * @dst_capacity: The size of the destination buffer. Must be at least as large + * as the decompressed size. If the caller cannot upper bound the + * decompressed size, then it's better to use the streaming API. + * @src: The zstd compressed data to decompress. Multiple concatenated + * frames and skippable frames are allowed. + * @src_size: The exact size of the data to decompress. + * @ddict: The dictionary to be used. + * + * Return: The decompressed size or an error, which can be checked using + * zstd_is_error(). + */ +size_t zstd_decompress_using_ddict(zstd_dctx *dctx, + void *dst, size_t dst_capacity, const void *src, size_t src_size, + const zstd_ddict *ddict); + + /* ====== Streaming Buffers ====== */ /** diff --git a/include/linux/zswap.h b/include/linux/zswap.h index 6cecb4a4f68b..9cd1beef0654 100644 --- a/include/linux/zswap.h +++ b/include/linux/zswap.h @@ -13,17 +13,15 @@ extern atomic_t zswap_stored_pages; struct zswap_lruvec_state { /* - * Number of pages in zswap that should be protected from the shrinker. - * This number is an estimate of the following counts: + * Number of swapped in pages from disk, i.e not found in the zswap pool. * - * a) Recent page faults. - * b) Recent insertion to the zswap LRU. This includes new zswap stores, - * as well as recent zswap LRU rotations. - * - * These pages are likely to be warm, and might incur IO if the are written - * to swap. + * This is consumed and subtracted from the lru size in + * zswap_shrinker_count() to penalize past overshrinking that led to disk + * swapins. The idea is that had we considered this many more pages in the + * LRU active/protected and not written them back, we would not have had to + * swapped them in. */ - atomic_long_t nr_zswap_protected; + atomic_long_t nr_disk_swapins; }; unsigned long zswap_total_pages(void); |