diff options
Diffstat (limited to 'include')
236 files changed, 4034 insertions, 2026 deletions
diff --git a/include/acpi/pcc.h b/include/acpi/pcc.h index 840bfc95bae3..9af3b502f839 100644 --- a/include/acpi/pcc.h +++ b/include/acpi/pcc.h @@ -17,6 +17,35 @@ struct pcc_mbox_chan { u32 latency; u32 max_access_rate; u16 min_turnaround_time; + + /* Set to true to indicate that the mailbox should manage + * writing the dat to the shared buffer. This differs from + * the case where the drivesr are writing to the buffer and + * using send_data only to ring the doorbell. If this flag + * is set, then the void * data parameter of send_data must + * point to a kernel-memory buffer formatted in accordance with + * the PCC specification. + * + * The active buffer management will include reading the + * notify_on_completion flag, and will then + * call mbox_chan_txdone when the acknowledgment interrupt is + * received. + */ + bool manage_writes; + + /* Optional callback that allows the driver + * to allocate the memory used for receiving + * messages. The return value is the location + * inside the buffer where the mailbox should write the data. + */ + void *(*rx_alloc)(struct mbox_client *cl, int size); +}; + +struct pcc_header { + u32 signature; + u32 flags; + u32 length; + u32 command; }; /* Generic Communications Channel Shared Memory Region */ diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild index 8675b7b4ad23..295c94a3ccc1 100644 --- a/include/asm-generic/Kbuild +++ b/include/asm-generic/Kbuild @@ -59,6 +59,7 @@ mandatory-y += tlbflush.h mandatory-y += topology.h mandatory-y += trace_clock.h mandatory-y += uaccess.h +mandatory-y += unwind_user.h mandatory-y += vermagic.h mandatory-y += vga.h mandatory-y += video.h diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h index 372c320c5043..a14f4bdafdda 100644 --- a/include/asm-generic/codetag.lds.h +++ b/include/asm-generic/codetag.lds.h @@ -2,6 +2,12 @@ #ifndef __ASM_GENERIC_CODETAG_LDS_H #define __ASM_GENERIC_CODETAG_LDS_H +#ifdef CONFIG_MEM_ALLOC_PROFILING +#define IF_MEM_ALLOC_PROFILING(...) __VA_ARGS__ +#else +#define IF_MEM_ALLOC_PROFILING(...) +#endif + #define SECTION_WITH_BOUNDARIES(_name) \ . = ALIGN(8); \ __start_##_name = .; \ @@ -9,13 +15,7 @@ __stop_##_name = .; #define CODETAG_SECTIONS() \ - SECTION_WITH_BOUNDARIES(alloc_tags) - -/* - * Module codetags which aren't used after module unload, therefore have the - * same lifespan as the module and can be safely unloaded with the module. - */ -#define MOD_CODETAG_SECTIONS() + IF_MEM_ALLOC_PROFILING(SECTION_WITH_BOUNDARIES(alloc_tags)) #define MOD_SEPARATE_CODETAG_SECTION(_name) \ .codetag.##_name : { \ @@ -28,6 +28,6 @@ * unload them individually once unused. */ #define MOD_SEPARATE_CODETAG_SECTIONS() \ - MOD_SEPARATE_CODETAG_SECTION(alloc_tags) + IF_MEM_ALLOC_PROFILING(MOD_SEPARATE_CODETAG_SECTION(alloc_tags)) #endif /* __ASM_GENERIC_CODETAG_LDS_H */ diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h index 3e0a8fe9b108..dcb8727f2b82 100644 --- a/include/asm-generic/hugetlb.h +++ b/include/asm-generic/hugetlb.h @@ -66,15 +66,6 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, } #endif -#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - free_pgd_range(tlb, addr, end, floor, ceiling); -} -#endif - #ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte, unsigned long sz) @@ -114,14 +105,6 @@ static inline int huge_pte_none_mostly(pte_t pte) } #endif -#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - return 0; -} -#endif - #ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) diff --git a/include/asm-generic/unwind_user.h b/include/asm-generic/unwind_user.h new file mode 100644 index 000000000000..b8882b909944 --- /dev/null +++ b/include/asm-generic/unwind_user.h @@ -0,0 +1,5 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_UNWIND_USER_H +#define _ASM_GENERIC_UNWIND_USER_H + +#endif /* _ASM_GENERIC_UNWIND_USER_H */ diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 188eface0a11..fc4574940636 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -43,8 +43,8 @@ * alias. */ #define MODULE_ALIAS_CRYPTO(name) \ - __MODULE_INFO(alias, alias_userspace, name); \ - __MODULE_INFO(alias, alias_crypto, "crypto-" name) + MODULE_INFO(alias, name); \ + MODULE_INFO(alias, "crypto-" name) struct crypto_aead; struct crypto_instance; diff --git a/include/crypto/engine.h b/include/crypto/engine.h index 545dbefe3e13..2e60344437da 100644 --- a/include/crypto/engine.h +++ b/include/crypto/engine.h @@ -76,7 +76,6 @@ int crypto_engine_stop(struct crypto_engine *engine); struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt); struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev, bool retry_support, - int (*cbk_do_batch)(struct crypto_engine *engine), bool rt, int qlen); void crypto_engine_exit(struct crypto_engine *engine); diff --git a/include/crypto/hash.h b/include/crypto/hash.h index db294d452e8c..bbaeae705ef0 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -184,7 +184,7 @@ struct shash_desc { * Worst case is hmac(sha3-224-s390). Its context is a nested 'shash_desc' * containing a 'struct s390_sha_ctx'. */ -#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 360) +#define HASH_MAX_DESCSIZE (sizeof(struct shash_desc) + 361) #define MAX_SYNC_HASH_REQSIZE (sizeof(struct ahash_request) + \ HASH_MAX_DESCSIZE) diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h index ffffd88bbbad..2d97440028ff 100644 --- a/include/crypto/internal/acompress.h +++ b/include/crypto/internal/acompress.h @@ -63,10 +63,7 @@ struct crypto_acomp_stream { struct crypto_acomp_streams { /* These must come first because of struct scomp_alg. */ void *(*alloc_ctx)(void); - union { - void (*free_ctx)(void *); - void (*cfree_ctx)(const void *); - }; + void (*free_ctx)(void *); struct crypto_acomp_stream __percpu *streams; struct work_struct stream_work; diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h index b6a4ea2240fc..f19ef376833f 100644 --- a/include/crypto/internal/engine.h +++ b/include/crypto/internal/engine.h @@ -21,7 +21,6 @@ struct device; /* * struct crypto_engine - crypto hardware engine * @name: the engine name - * @idling: the engine is entering idle state * @busy: request pump is busy * @running: the engine is on working * @retry_support: indication that the hardware allows re-execution @@ -31,14 +30,6 @@ struct device; * @list: link with the global crypto engine list * @queue_lock: spinlock to synchronise access to request queue * @queue: the crypto queue of the engine - * @prepare_crypt_hardware: a request will soon arrive from the queue - * so the subsystem requests the driver to prepare the hardware - * by issuing this call - * @unprepare_crypt_hardware: there are currently no more requests on the - * queue so the subsystem notifies the driver that it may relax the - * hardware by issuing this call - * @do_batch_requests: execute a batch of requests. Depends on multiple - * requests support. * @kworker: kthread worker struct for request pump * @pump_requests: work struct for scheduling work to the request pump * @priv_data: the engine private data @@ -46,7 +37,6 @@ struct device; */ struct crypto_engine { char name[ENGINE_NAME_LEN]; - bool idling; bool busy; bool running; @@ -58,11 +48,6 @@ struct crypto_engine { struct crypto_queue queue; struct device *dev; - int (*prepare_crypt_hardware)(struct crypto_engine *engine); - int (*unprepare_crypt_hardware)(struct crypto_engine *engine); - int (*do_batch_requests)(struct crypto_engine *engine); - - struct kthread_worker *kworker; struct kthread_work pump_requests; diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h index 0f85c543f80b..6ec5f2f37ccb 100644 --- a/include/crypto/internal/hash.h +++ b/include/crypto/internal/hash.h @@ -30,6 +30,20 @@ __##name##_req, (req)) struct ahash_request; +struct scatterlist; + +struct crypto_hash_walk { + const char *data; + + unsigned int offset; + unsigned int flags; + + struct page *pg; + unsigned int entrylen; + + unsigned int total; + struct scatterlist *sg; +}; struct ahash_instance { void (*free)(struct ahash_instance *inst); @@ -61,6 +75,15 @@ struct crypto_shash_spawn { struct crypto_spawn base; }; +int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err); +int crypto_hash_walk_first(struct ahash_request *req, + struct crypto_hash_walk *walk); + +static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk) +{ + return !(walk->entrylen | walk->total); +} + int crypto_register_ahash(struct ahash_alg *alg); void crypto_unregister_ahash(struct ahash_alg *alg); int crypto_register_ahashes(struct ahash_alg *algs, int count); @@ -91,6 +114,12 @@ static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg) !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY); } +static inline bool crypto_hash_no_export_core(struct crypto_ahash *tfm) +{ + return crypto_hash_alg_common(tfm)->base.cra_flags & + CRYPTO_AHASH_ALG_NO_EXPORT_CORE; +} + int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask); @@ -167,6 +196,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm, tfm->reqsize = reqsize; } +static inline bool crypto_ahash_tested(struct crypto_ahash *tfm) +{ + struct crypto_tfm *tfm_base = crypto_ahash_tfm(tfm); + + return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED; +} + static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash, unsigned int reqsize) { diff --git a/include/cxl/event.h b/include/cxl/event.h index f9ae1796da85..6fd90f9cc203 100644 --- a/include/cxl/event.h +++ b/include/cxl/event.h @@ -19,7 +19,9 @@ struct cxl_event_record_hdr { __le64 timestamp; u8 maint_op_class; u8 maint_op_sub_class; - u8 reserved[14]; + __le16 ld_id; + u8 head_id; + u8 reserved[11]; } __packed; struct cxl_event_media_hdr { @@ -108,11 +110,43 @@ struct cxl_event_mem_module { u8 reserved[0x2a]; } __packed; +/* + * Memory Sparing Event Record - MSER + * CXL rev 3.2 section 8.2.10.2.1.4; Table 8-60 + */ +struct cxl_event_mem_sparing { + struct cxl_event_record_hdr hdr; + /* + * The fields maintenance operation class and maintenance operation + * subclass defined in the Memory Sparing Event Record are the + * duplication of the same in the common event record. Thus defined + * as reserved and to be removed after the spec correction. + */ + u8 rsv1; + u8 rsv2; + u8 flags; + u8 result; + __le16 validity_flags; + u8 reserved1[6]; + __le16 res_avail; + u8 channel; + u8 rank; + u8 nibble_mask[3]; + u8 bank_group; + u8 bank; + u8 row[3]; + __le16 column; + u8 component_id[CXL_EVENT_GEN_MED_COMP_ID_SIZE]; + u8 sub_channel; + u8 reserved2[0x25]; +} __packed; + union cxl_event { struct cxl_event_generic generic; struct cxl_event_gen_media gen_media; struct cxl_event_dram dram; struct cxl_event_mem_module mem_module; + struct cxl_event_mem_sparing mem_sparing; /* dram & gen_media event header */ struct cxl_event_media_hdr media_hdr; } __packed; @@ -131,6 +165,7 @@ enum cxl_event_type { CXL_CPER_EVENT_GEN_MEDIA, CXL_CPER_EVENT_DRAM, CXL_CPER_EVENT_MEM_MODULE, + CXL_CPER_EVENT_MEM_SPARING, }; #define CPER_CXL_DEVICE_ID_VALID BIT(0) diff --git a/include/dt-bindings/clock/nxp,imx94-clock.h b/include/dt-bindings/clock/nxp,imx94-clock.h new file mode 100644 index 000000000000..c4ba13352b99 --- /dev/null +++ b/include/dt-bindings/clock/nxp,imx94-clock.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright 2025 NXP + */ + +#ifndef __DT_BINDINGS_CLOCK_IMX94_H +#define __DT_BINDINGS_CLOCK_IMX94_H + +#define IMX94_CLK_DISPMIX_CLK_SEL 0 + +#define IMX94_CLK_DISPMIX_LVDS_CLK_GATE 0 + +#endif /* __DT_BINDINGS_CLOCK_IMX94_H */ diff --git a/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h new file mode 100644 index 000000000000..586d1c9b33b3 --- /dev/null +++ b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H +#define _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H + +/* CMN PLL core clock. */ +#define IPQ5018_CMN_PLL_CLK 0 + +/* The output clocks from CMN PLL of IPQ5018. */ +#define IPQ5018_XO_24MHZ_CLK 1 +#define IPQ5018_SLEEP_32KHZ_CLK 2 +#define IPQ5018_ETH_50MHZ_CLK 3 +#endif diff --git a/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h new file mode 100644 index 000000000000..f643c2668c04 --- /dev/null +++ b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H +#define _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H + +/* CMN PLL core clock. */ +#define IPQ5424_CMN_PLL_CLK 0 + +/* The output clocks from CMN PLL of IPQ5424. */ +#define IPQ5424_XO_24MHZ_CLK 1 +#define IPQ5424_SLEEP_32KHZ_CLK 2 +#define IPQ5424_PCS_31P25MHZ_CLK 3 +#define IPQ5424_NSS_300MHZ_CLK 4 +#define IPQ5424_PPE_375MHZ_CLK 5 +#define IPQ5424_ETH0_50MHZ_CLK 6 +#define IPQ5424_ETH1_50MHZ_CLK 7 +#define IPQ5424_ETH2_50MHZ_CLK 8 +#define IPQ5424_ETH_25MHZ_CLK 9 +#endif diff --git a/include/dt-bindings/clock/qcom,milos-camcc.h b/include/dt-bindings/clock/qcom,milos-camcc.h new file mode 100644 index 000000000000..21925dca9a20 --- /dev/null +++ b/include/dt-bindings/clock/qcom,milos-camcc.h @@ -0,0 +1,131 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com> + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H + +/* CAM_CC clocks */ +#define CAM_CC_PLL0 0 +#define CAM_CC_PLL0_OUT_EVEN 1 +#define CAM_CC_PLL0_OUT_ODD 2 +#define CAM_CC_PLL1 3 +#define CAM_CC_PLL1_OUT_EVEN 4 +#define CAM_CC_PLL2 5 +#define CAM_CC_PLL2_OUT_EVEN 6 +#define CAM_CC_PLL3 7 +#define CAM_CC_PLL3_OUT_EVEN 8 +#define CAM_CC_PLL4 9 +#define CAM_CC_PLL4_OUT_EVEN 10 +#define CAM_CC_PLL5 11 +#define CAM_CC_PLL5_OUT_EVEN 12 +#define CAM_CC_PLL6 13 +#define CAM_CC_PLL6_OUT_EVEN 14 +#define CAM_CC_BPS_AHB_CLK 15 +#define CAM_CC_BPS_AREG_CLK 16 +#define CAM_CC_BPS_CLK 17 +#define CAM_CC_BPS_CLK_SRC 18 +#define CAM_CC_CAMNOC_ATB_CLK 19 +#define CAM_CC_CAMNOC_AXI_CLK_SRC 20 +#define CAM_CC_CAMNOC_AXI_HF_CLK 21 +#define CAM_CC_CAMNOC_AXI_SF_CLK 22 +#define CAM_CC_CAMNOC_NRT_AXI_CLK 23 +#define CAM_CC_CAMNOC_RT_AXI_CLK 24 +#define CAM_CC_CCI_0_CLK 25 +#define CAM_CC_CCI_0_CLK_SRC 26 +#define CAM_CC_CCI_1_CLK 27 +#define CAM_CC_CCI_1_CLK_SRC 28 +#define CAM_CC_CORE_AHB_CLK 29 +#define CAM_CC_CPAS_AHB_CLK 30 +#define CAM_CC_CPHY_RX_CLK_SRC 31 +#define CAM_CC_CRE_AHB_CLK 32 +#define CAM_CC_CRE_CLK 33 +#define CAM_CC_CRE_CLK_SRC 34 +#define CAM_CC_CSI0PHYTIMER_CLK 35 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 36 +#define CAM_CC_CSI1PHYTIMER_CLK 37 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 38 +#define CAM_CC_CSI2PHYTIMER_CLK 39 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 40 +#define CAM_CC_CSI3PHYTIMER_CLK 41 +#define CAM_CC_CSI3PHYTIMER_CLK_SRC 42 +#define CAM_CC_CSIPHY0_CLK 43 +#define CAM_CC_CSIPHY1_CLK 44 +#define CAM_CC_CSIPHY2_CLK 45 +#define CAM_CC_CSIPHY3_CLK 46 +#define CAM_CC_FAST_AHB_CLK_SRC 47 +#define CAM_CC_GDSC_CLK 48 +#define CAM_CC_ICP_ATB_CLK 49 +#define CAM_CC_ICP_CLK 50 +#define CAM_CC_ICP_CLK_SRC 51 +#define CAM_CC_ICP_CTI_CLK 52 +#define CAM_CC_ICP_TS_CLK 53 +#define CAM_CC_MCLK0_CLK 54 +#define CAM_CC_MCLK0_CLK_SRC 55 +#define CAM_CC_MCLK1_CLK 56 +#define CAM_CC_MCLK1_CLK_SRC 57 +#define CAM_CC_MCLK2_CLK 58 +#define CAM_CC_MCLK2_CLK_SRC 59 +#define CAM_CC_MCLK3_CLK 60 +#define CAM_CC_MCLK3_CLK_SRC 61 +#define CAM_CC_MCLK4_CLK 62 +#define CAM_CC_MCLK4_CLK_SRC 63 +#define CAM_CC_OPE_0_AHB_CLK 64 +#define CAM_CC_OPE_0_AREG_CLK 65 +#define CAM_CC_OPE_0_CLK 66 +#define CAM_CC_OPE_0_CLK_SRC 67 +#define CAM_CC_SLEEP_CLK 68 +#define CAM_CC_SLEEP_CLK_SRC 69 +#define CAM_CC_SLOW_AHB_CLK_SRC 70 +#define CAM_CC_SOC_AHB_CLK 71 +#define CAM_CC_SYS_TMR_CLK 72 +#define CAM_CC_TFE_0_AHB_CLK 73 +#define CAM_CC_TFE_0_CLK 74 +#define CAM_CC_TFE_0_CLK_SRC 75 +#define CAM_CC_TFE_0_CPHY_RX_CLK 76 +#define CAM_CC_TFE_0_CSID_CLK 77 +#define CAM_CC_TFE_0_CSID_CLK_SRC 78 +#define CAM_CC_TFE_1_AHB_CLK 79 +#define CAM_CC_TFE_1_CLK 80 +#define CAM_CC_TFE_1_CLK_SRC 81 +#define CAM_CC_TFE_1_CPHY_RX_CLK 82 +#define CAM_CC_TFE_1_CSID_CLK 83 +#define CAM_CC_TFE_1_CSID_CLK_SRC 84 +#define CAM_CC_TFE_2_AHB_CLK 85 +#define CAM_CC_TFE_2_CLK 86 +#define CAM_CC_TFE_2_CLK_SRC 87 +#define CAM_CC_TFE_2_CPHY_RX_CLK 88 +#define CAM_CC_TFE_2_CSID_CLK 89 +#define CAM_CC_TFE_2_CSID_CLK_SRC 90 +#define CAM_CC_TOP_SHIFT_CLK 91 +#define CAM_CC_XO_CLK_SRC 92 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_CAMNOC_BCR 1 +#define CAM_CC_CAMSS_TOP_BCR 2 +#define CAM_CC_CCI_0_BCR 3 +#define CAM_CC_CCI_1_BCR 4 +#define CAM_CC_CPAS_BCR 5 +#define CAM_CC_CRE_BCR 6 +#define CAM_CC_CSI0PHY_BCR 7 +#define CAM_CC_CSI1PHY_BCR 8 +#define CAM_CC_CSI2PHY_BCR 9 +#define CAM_CC_CSI3PHY_BCR 10 +#define CAM_CC_ICP_BCR 11 +#define CAM_CC_MCLK0_BCR 12 +#define CAM_CC_MCLK1_BCR 13 +#define CAM_CC_MCLK2_BCR 14 +#define CAM_CC_MCLK3_BCR 15 +#define CAM_CC_MCLK4_BCR 16 +#define CAM_CC_OPE_0_BCR 17 +#define CAM_CC_TFE_0_BCR 18 +#define CAM_CC_TFE_1_BCR 19 +#define CAM_CC_TFE_2_BCR 20 + +/* CAM_CC power domains */ +#define CAM_CC_CAMSS_TOP_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,milos-dispcc.h b/include/dt-bindings/clock/qcom,milos-dispcc.h new file mode 100644 index 000000000000..c70f23f32f0a --- /dev/null +++ b/include/dt-bindings/clock/qcom,milos-dispcc.h @@ -0,0 +1,61 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com> + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H +#define _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H + +/* DISP_CC clocks */ +#define DISP_CC_PLL0 0 +#define DISP_CC_MDSS_ACCU_CLK 1 +#define DISP_CC_MDSS_AHB1_CLK 2 +#define DISP_CC_MDSS_AHB_CLK 3 +#define DISP_CC_MDSS_AHB_CLK_SRC 4 +#define DISP_CC_MDSS_BYTE0_CLK 5 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 6 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 7 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 8 +#define DISP_CC_MDSS_DPTX0_AUX_CLK 9 +#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 10 +#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 11 +#define DISP_CC_MDSS_DPTX0_LINK_CLK 12 +#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 13 +#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 14 +#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 15 +#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 16 +#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 17 +#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 18 +#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 19 +#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 20 +#define DISP_CC_MDSS_ESC0_CLK 21 +#define DISP_CC_MDSS_ESC0_CLK_SRC 22 +#define DISP_CC_MDSS_MDP1_CLK 23 +#define DISP_CC_MDSS_MDP_CLK 24 +#define DISP_CC_MDSS_MDP_CLK_SRC 25 +#define DISP_CC_MDSS_MDP_LUT1_CLK 26 +#define DISP_CC_MDSS_MDP_LUT_CLK 27 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 28 +#define DISP_CC_MDSS_PCLK0_CLK 29 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 30 +#define DISP_CC_MDSS_RSCC_AHB_CLK 31 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 32 +#define DISP_CC_MDSS_VSYNC1_CLK 33 +#define DISP_CC_MDSS_VSYNC_CLK 34 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 35 +#define DISP_CC_SLEEP_CLK 36 +#define DISP_CC_SLEEP_CLK_SRC 37 +#define DISP_CC_XO_CLK 38 +#define DISP_CC_XO_CLK_SRC 39 + +/* DISP_CC resets */ +#define DISP_CC_MDSS_CORE_BCR 0 +#define DISP_CC_MDSS_CORE_INT2_BCR 1 +#define DISP_CC_MDSS_RSCC_BCR 2 + +/* DISP_CC power domains */ +#define DISP_CC_MDSS_CORE_GDSC 0 +#define DISP_CC_MDSS_CORE_INT2_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,milos-gcc.h b/include/dt-bindings/clock/qcom,milos-gcc.h new file mode 100644 index 000000000000..a530ca39e1ef --- /dev/null +++ b/include/dt-bindings/clock/qcom,milos-gcc.h @@ -0,0 +1,210 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com> + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H +#define _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H + +/* GCC clocks */ +#define GCC_GPLL0 0 +#define GCC_GPLL0_OUT_EVEN 1 +#define GCC_GPLL2 2 +#define GCC_GPLL4 3 +#define GCC_GPLL6 4 +#define GCC_GPLL7 5 +#define GCC_GPLL9 6 +#define GCC_AGGRE_NOC_PCIE_AXI_CLK 7 +#define GCC_AGGRE_UFS_PHY_AXI_CLK 8 +#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 9 +#define GCC_AGGRE_USB3_PRIM_AXI_CLK 10 +#define GCC_BOOT_ROM_AHB_CLK 11 +#define GCC_CAMERA_AHB_CLK 12 +#define GCC_CAMERA_HF_AXI_CLK 13 +#define GCC_CAMERA_HF_XO_CLK 14 +#define GCC_CAMERA_SF_AXI_CLK 15 +#define GCC_CAMERA_SF_XO_CLK 16 +#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 17 +#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 18 +#define GCC_CNOC_PCIE_SF_AXI_CLK 19 +#define GCC_DDRSS_GPU_AXI_CLK 20 +#define GCC_DDRSS_PCIE_SF_QTB_CLK 21 +#define GCC_DISP_AHB_CLK 22 +#define GCC_DISP_GPLL0_DIV_CLK_SRC 23 +#define GCC_DISP_HF_AXI_CLK 24 +#define GCC_DISP_XO_CLK 25 +#define GCC_GP1_CLK 26 +#define GCC_GP1_CLK_SRC 27 +#define GCC_GP2_CLK 28 +#define GCC_GP2_CLK_SRC 29 +#define GCC_GP3_CLK 30 +#define GCC_GP3_CLK_SRC 31 +#define GCC_GPU_CFG_AHB_CLK 32 +#define GCC_GPU_GPLL0_CLK_SRC 33 +#define GCC_GPU_GPLL0_DIV_CLK_SRC 34 +#define GCC_GPU_MEMNOC_GFX_CLK 35 +#define GCC_GPU_SNOC_DVM_GFX_CLK 36 +#define GCC_PCIE_0_AUX_CLK 37 +#define GCC_PCIE_0_AUX_CLK_SRC 38 +#define GCC_PCIE_0_CFG_AHB_CLK 39 +#define GCC_PCIE_0_MSTR_AXI_CLK 40 +#define GCC_PCIE_0_PHY_RCHNG_CLK 41 +#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42 +#define GCC_PCIE_0_PIPE_CLK 43 +#define GCC_PCIE_0_PIPE_CLK_SRC 44 +#define GCC_PCIE_0_PIPE_DIV2_CLK 45 +#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 46 +#define GCC_PCIE_0_SLV_AXI_CLK 47 +#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48 +#define GCC_PCIE_1_AUX_CLK 49 +#define GCC_PCIE_1_AUX_CLK_SRC 50 +#define GCC_PCIE_1_CFG_AHB_CLK 51 +#define GCC_PCIE_1_MSTR_AXI_CLK 52 +#define GCC_PCIE_1_PHY_RCHNG_CLK 53 +#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 54 +#define GCC_PCIE_1_PIPE_CLK 55 +#define GCC_PCIE_1_PIPE_CLK_SRC 56 +#define GCC_PCIE_1_PIPE_DIV2_CLK 57 +#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 58 +#define GCC_PCIE_1_SLV_AXI_CLK 59 +#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 60 +#define GCC_PCIE_RSCC_CFG_AHB_CLK 61 +#define GCC_PCIE_RSCC_XO_CLK 62 +#define GCC_PDM2_CLK 63 +#define GCC_PDM2_CLK_SRC 64 +#define GCC_PDM_AHB_CLK 65 +#define GCC_PDM_XO4_CLK 66 +#define GCC_QMIP_CAMERA_NRT_AHB_CLK 67 +#define GCC_QMIP_CAMERA_RT_AHB_CLK 68 +#define GCC_QMIP_DISP_AHB_CLK 69 +#define GCC_QMIP_GPU_AHB_CLK 70 +#define GCC_QMIP_PCIE_AHB_CLK 71 +#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 72 +#define GCC_QMIP_VIDEO_CVP_AHB_CLK 73 +#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 74 +#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 75 +#define GCC_QUPV3_WRAP0_CORE_2X_CLK 76 +#define GCC_QUPV3_WRAP0_CORE_CLK 77 +#define GCC_QUPV3_WRAP0_QSPI_REF_CLK 78 +#define GCC_QUPV3_WRAP0_QSPI_REF_CLK_SRC 79 +#define GCC_QUPV3_WRAP0_S0_CLK 80 +#define GCC_QUPV3_WRAP0_S0_CLK_SRC 81 +#define GCC_QUPV3_WRAP0_S1_CLK 82 +#define GCC_QUPV3_WRAP0_S1_CLK_SRC 83 +#define GCC_QUPV3_WRAP0_S2_CLK 84 +#define GCC_QUPV3_WRAP0_S2_CLK_SRC 85 +#define GCC_QUPV3_WRAP0_S3_CLK 86 +#define GCC_QUPV3_WRAP0_S3_CLK_SRC 87 +#define GCC_QUPV3_WRAP0_S4_CLK 88 +#define GCC_QUPV3_WRAP0_S4_CLK_SRC 89 +#define GCC_QUPV3_WRAP0_S5_CLK 90 +#define GCC_QUPV3_WRAP0_S5_CLK_SRC 91 +#define GCC_QUPV3_WRAP0_S6_CLK 92 +#define GCC_QUPV3_WRAP0_S6_CLK_SRC 93 +#define GCC_QUPV3_WRAP1_CORE_2X_CLK 94 +#define GCC_QUPV3_WRAP1_CORE_CLK 95 +#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 96 +#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 97 +#define GCC_QUPV3_WRAP1_S0_CLK 98 +#define GCC_QUPV3_WRAP1_S0_CLK_SRC 99 +#define GCC_QUPV3_WRAP1_S1_CLK 100 +#define GCC_QUPV3_WRAP1_S1_CLK_SRC 101 +#define GCC_QUPV3_WRAP1_S2_CLK 102 +#define GCC_QUPV3_WRAP1_S2_CLK_SRC 103 +#define GCC_QUPV3_WRAP1_S3_CLK 104 +#define GCC_QUPV3_WRAP1_S3_CLK_SRC 105 +#define GCC_QUPV3_WRAP1_S4_CLK 106 +#define GCC_QUPV3_WRAP1_S4_CLK_SRC 107 +#define GCC_QUPV3_WRAP1_S5_CLK 108 +#define GCC_QUPV3_WRAP1_S5_CLK_SRC 109 +#define GCC_QUPV3_WRAP1_S6_CLK 110 +#define GCC_QUPV3_WRAP1_S6_CLK_SRC 111 +#define GCC_QUPV3_WRAP_0_M_AHB_CLK 112 +#define GCC_QUPV3_WRAP_0_S_AHB_CLK 113 +#define GCC_QUPV3_WRAP_1_M_AHB_CLK 114 +#define GCC_QUPV3_WRAP_1_S_AHB_CLK 115 +#define GCC_SDCC1_AHB_CLK 116 +#define GCC_SDCC1_APPS_CLK 117 +#define GCC_SDCC1_APPS_CLK_SRC 118 +#define GCC_SDCC1_ICE_CORE_CLK 119 +#define GCC_SDCC1_ICE_CORE_CLK_SRC 120 +#define GCC_SDCC2_AHB_CLK 121 +#define GCC_SDCC2_APPS_CLK 122 +#define GCC_SDCC2_APPS_CLK_SRC 123 +#define GCC_UFS_PHY_AHB_CLK 124 +#define GCC_UFS_PHY_AXI_CLK 125 +#define GCC_UFS_PHY_AXI_CLK_SRC 126 +#define GCC_UFS_PHY_AXI_HW_CTL_CLK 127 +#define GCC_UFS_PHY_ICE_CORE_CLK 128 +#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 129 +#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 130 +#define GCC_UFS_PHY_PHY_AUX_CLK 131 +#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 132 +#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 133 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 134 +#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 135 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 136 +#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 137 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138 +#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 139 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK 140 +#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 141 +#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 142 +#define GCC_USB30_PRIM_ATB_CLK 143 +#define GCC_USB30_PRIM_MASTER_CLK 144 +#define GCC_USB30_PRIM_MASTER_CLK_SRC 145 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146 +#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147 +#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 148 +#define GCC_USB30_PRIM_SLEEP_CLK 149 +#define GCC_USB3_PRIM_PHY_AUX_CLK 150 +#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 151 +#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 152 +#define GCC_USB3_PRIM_PHY_PIPE_CLK 153 +#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 154 +#define GCC_VIDEO_AHB_CLK 155 +#define GCC_VIDEO_AXI0_CLK 156 +#define GCC_VIDEO_XO_CLK 157 + +/* GCC resets */ +#define GCC_CAMERA_BCR 0 +#define GCC_DISPLAY_BCR 1 +#define GCC_GPU_BCR 2 +#define GCC_PCIE_0_BCR 3 +#define GCC_PCIE_0_LINK_DOWN_BCR 4 +#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5 +#define GCC_PCIE_0_PHY_BCR 6 +#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7 +#define GCC_PCIE_1_BCR 8 +#define GCC_PCIE_1_LINK_DOWN_BCR 9 +#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10 +#define GCC_PCIE_1_PHY_BCR 11 +#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12 +#define GCC_PCIE_RSCC_BCR 13 +#define GCC_PDM_BCR 14 +#define GCC_QUPV3_WRAPPER_0_BCR 15 +#define GCC_QUPV3_WRAPPER_1_BCR 16 +#define GCC_QUSB2PHY_PRIM_BCR 17 +#define GCC_QUSB2PHY_SEC_BCR 18 +#define GCC_SDCC1_BCR 19 +#define GCC_SDCC2_BCR 20 +#define GCC_UFS_PHY_BCR 21 +#define GCC_USB30_PRIM_BCR 22 +#define GCC_USB3_DP_PHY_PRIM_BCR 23 +#define GCC_USB3_PHY_PRIM_BCR 24 +#define GCC_USB3PHY_PHY_PRIM_BCR 25 +#define GCC_VIDEO_AXI0_CLK_ARES 26 +#define GCC_VIDEO_BCR 27 + +/* GCC power domains */ +#define PCIE_0_GDSC 0 +#define PCIE_0_PHY_GDSC 1 +#define PCIE_1_GDSC 2 +#define PCIE_1_PHY_GDSC 3 +#define UFS_PHY_GDSC 4 +#define UFS_MEM_PHY_GDSC 5 +#define USB30_PRIM_GDSC 6 +#define USB3_PHY_GDSC 7 + +#endif diff --git a/include/dt-bindings/clock/qcom,milos-gpucc.h b/include/dt-bindings/clock/qcom,milos-gpucc.h new file mode 100644 index 000000000000..6ff1925d409f --- /dev/null +++ b/include/dt-bindings/clock/qcom,milos-gpucc.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com> + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H + +/* GPU_CC clocks */ +#define GPU_CC_PLL0 0 +#define GPU_CC_PLL0_OUT_EVEN 1 +#define GPU_CC_AHB_CLK 2 +#define GPU_CC_CB_CLK 3 +#define GPU_CC_CX_ACCU_SHIFT_CLK 4 +#define GPU_CC_CX_FF_CLK 5 +#define GPU_CC_CX_GMU_CLK 6 +#define GPU_CC_CXO_AON_CLK 7 +#define GPU_CC_CXO_CLK 8 +#define GPU_CC_DEMET_CLK 9 +#define GPU_CC_DEMET_DIV_CLK_SRC 10 +#define GPU_CC_DPM_CLK 11 +#define GPU_CC_FF_CLK_SRC 12 +#define GPU_CC_FREQ_MEASURE_CLK 13 +#define GPU_CC_GMU_CLK_SRC 14 +#define GPU_CC_GX_ACCU_SHIFT_CLK 15 +#define GPU_CC_GX_ACD_AHB_FF_CLK 16 +#define GPU_CC_GX_AHB_FF_CLK 17 +#define GPU_CC_GX_GMU_CLK 18 +#define GPU_CC_GX_RCG_AHB_FF_CLK 19 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 20 +#define GPU_CC_HUB_AON_CLK 21 +#define GPU_CC_HUB_CLK_SRC 22 +#define GPU_CC_HUB_CX_INT_CLK 23 +#define GPU_CC_HUB_DIV_CLK_SRC 24 +#define GPU_CC_MEMNOC_GFX_CLK 25 +#define GPU_CC_RSCC_HUB_AON_CLK 26 +#define GPU_CC_RSCC_XO_AON_CLK 27 +#define GPU_CC_SLEEP_CLK 28 +#define GPU_CC_XO_CLK_SRC 29 +#define GPU_CC_XO_DIV_CLK_SRC 30 + +/* GPU_CC resets */ +#define GPU_CC_CB_BCR 0 +#define GPU_CC_CX_BCR 1 +#define GPU_CC_FAST_HUB_BCR 2 +#define GPU_CC_FF_BCR 3 +#define GPU_CC_GMU_BCR 4 +#define GPU_CC_GX_BCR 5 +#define GPU_CC_RBCPR_BCR 6 +#define GPU_CC_XO_BCR 7 + +/* GPU_CC power domains */ +#define GPU_CC_CX_GDSC 0 + +#endif diff --git a/include/dt-bindings/clock/qcom,milos-videocc.h b/include/dt-bindings/clock/qcom,milos-videocc.h new file mode 100644 index 000000000000..3544db81ffae --- /dev/null +++ b/include/dt-bindings/clock/qcom,milos-videocc.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com> + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H +#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H + +/* VIDEO_CC clocks */ +#define VIDEO_CC_PLL0 0 +#define VIDEO_CC_AHB_CLK 1 +#define VIDEO_CC_AHB_CLK_SRC 2 +#define VIDEO_CC_MVS0_CLK 3 +#define VIDEO_CC_MVS0_CLK_SRC 4 +#define VIDEO_CC_MVS0_DIV_CLK_SRC 5 +#define VIDEO_CC_MVS0_SHIFT_CLK 6 +#define VIDEO_CC_MVS0C_CLK 7 +#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 8 +#define VIDEO_CC_MVS0C_SHIFT_CLK 9 +#define VIDEO_CC_SLEEP_CLK 10 +#define VIDEO_CC_SLEEP_CLK_SRC 11 +#define VIDEO_CC_XO_CLK 12 +#define VIDEO_CC_XO_CLK_SRC 13 + +/* VIDEO_CC resets */ +#define VIDEO_CC_INTERFACE_BCR 0 +#define VIDEO_CC_MVS0_BCR 1 +#define VIDEO_CC_MVS0C_CLK_ARES 2 +#define VIDEO_CC_MVS0C_BCR 3 + +/* VIDEO_CC power domains */ +#define VIDEO_CC_MVS0_GDSC 0 +#define VIDEO_CC_MVS0C_GDSC 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,qcs615-camcc.h b/include/dt-bindings/clock/qcom,qcs615-camcc.h new file mode 100644 index 000000000000..aec57dddc067 --- /dev/null +++ b/include/dt-bindings/clock/qcom,qcs615-camcc.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H +#define _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H + +/* CAM_CC clocks */ +#define CAM_CC_BPS_AHB_CLK 0 +#define CAM_CC_BPS_AREG_CLK 1 +#define CAM_CC_BPS_AXI_CLK 2 +#define CAM_CC_BPS_CLK 3 +#define CAM_CC_BPS_CLK_SRC 4 +#define CAM_CC_CAMNOC_ATB_CLK 5 +#define CAM_CC_CAMNOC_AXI_CLK 6 +#define CAM_CC_CCI_CLK 7 +#define CAM_CC_CCI_CLK_SRC 8 +#define CAM_CC_CORE_AHB_CLK 9 +#define CAM_CC_CPAS_AHB_CLK 10 +#define CAM_CC_CPHY_RX_CLK_SRC 11 +#define CAM_CC_CSI0PHYTIMER_CLK 12 +#define CAM_CC_CSI0PHYTIMER_CLK_SRC 13 +#define CAM_CC_CSI1PHYTIMER_CLK 14 +#define CAM_CC_CSI1PHYTIMER_CLK_SRC 15 +#define CAM_CC_CSI2PHYTIMER_CLK 16 +#define CAM_CC_CSI2PHYTIMER_CLK_SRC 17 +#define CAM_CC_CSIPHY0_CLK 18 +#define CAM_CC_CSIPHY1_CLK 19 +#define CAM_CC_CSIPHY2_CLK 20 +#define CAM_CC_FAST_AHB_CLK_SRC 21 +#define CAM_CC_ICP_ATB_CLK 22 +#define CAM_CC_ICP_CLK 23 +#define CAM_CC_ICP_CLK_SRC 24 +#define CAM_CC_ICP_CTI_CLK 25 +#define CAM_CC_ICP_TS_CLK 26 +#define CAM_CC_IFE_0_AXI_CLK 27 +#define CAM_CC_IFE_0_CLK 28 +#define CAM_CC_IFE_0_CLK_SRC 29 +#define CAM_CC_IFE_0_CPHY_RX_CLK 30 +#define CAM_CC_IFE_0_CSID_CLK 31 +#define CAM_CC_IFE_0_CSID_CLK_SRC 32 +#define CAM_CC_IFE_0_DSP_CLK 33 +#define CAM_CC_IFE_1_AXI_CLK 34 +#define CAM_CC_IFE_1_CLK 35 +#define CAM_CC_IFE_1_CLK_SRC 36 +#define CAM_CC_IFE_1_CPHY_RX_CLK 37 +#define CAM_CC_IFE_1_CSID_CLK 38 +#define CAM_CC_IFE_1_CSID_CLK_SRC 39 +#define CAM_CC_IFE_1_DSP_CLK 40 +#define CAM_CC_IFE_LITE_CLK 41 +#define CAM_CC_IFE_LITE_CLK_SRC 42 +#define CAM_CC_IFE_LITE_CPHY_RX_CLK 43 +#define CAM_CC_IFE_LITE_CSID_CLK 44 +#define CAM_CC_IFE_LITE_CSID_CLK_SRC 45 +#define CAM_CC_IPE_0_AHB_CLK 46 +#define CAM_CC_IPE_0_AREG_CLK 47 +#define CAM_CC_IPE_0_AXI_CLK 48 +#define CAM_CC_IPE_0_CLK 49 +#define CAM_CC_IPE_0_CLK_SRC 50 +#define CAM_CC_JPEG_CLK 51 +#define CAM_CC_JPEG_CLK_SRC 52 +#define CAM_CC_LRME_CLK 53 +#define CAM_CC_LRME_CLK_SRC 54 +#define CAM_CC_MCLK0_CLK 55 +#define CAM_CC_MCLK0_CLK_SRC 56 +#define CAM_CC_MCLK1_CLK 57 +#define CAM_CC_MCLK1_CLK_SRC 58 +#define CAM_CC_MCLK2_CLK 59 +#define CAM_CC_MCLK2_CLK_SRC 60 +#define CAM_CC_MCLK3_CLK 61 +#define CAM_CC_MCLK3_CLK_SRC 62 +#define CAM_CC_PLL0 63 +#define CAM_CC_PLL1 64 +#define CAM_CC_PLL2 65 +#define CAM_CC_PLL2_OUT_AUX2 66 +#define CAM_CC_PLL3 67 +#define CAM_CC_SLOW_AHB_CLK_SRC 68 +#define CAM_CC_SOC_AHB_CLK 69 +#define CAM_CC_SYS_TMR_CLK 70 + +/* CAM_CC power domains */ +#define BPS_GDSC 0 +#define IFE_0_GDSC 1 +#define IFE_1_GDSC 2 +#define IPE_0_GDSC 3 +#define TITAN_TOP_GDSC 4 + +/* CAM_CC resets */ +#define CAM_CC_BPS_BCR 0 +#define CAM_CC_CAMNOC_BCR 1 +#define CAM_CC_CCI_BCR 2 +#define CAM_CC_CPAS_BCR 3 +#define CAM_CC_CSI0PHY_BCR 4 +#define CAM_CC_CSI1PHY_BCR 5 +#define CAM_CC_CSI2PHY_BCR 6 +#define CAM_CC_ICP_BCR 7 +#define CAM_CC_IFE_0_BCR 8 +#define CAM_CC_IFE_1_BCR 9 +#define CAM_CC_IFE_LITE_BCR 10 +#define CAM_CC_IPE_0_BCR 11 +#define CAM_CC_JPEG_BCR 12 +#define CAM_CC_LRME_BCR 13 +#define CAM_CC_MCLK0_BCR 14 +#define CAM_CC_MCLK1_BCR 15 +#define CAM_CC_MCLK2_BCR 16 +#define CAM_CC_MCLK3_BCR 17 +#define CAM_CC_TITAN_TOP_BCR 18 + +#endif diff --git a/include/dt-bindings/clock/qcom,qcs615-dispcc.h b/include/dt-bindings/clock/qcom,qcs615-dispcc.h new file mode 100644 index 000000000000..9a29945c5762 --- /dev/null +++ b/include/dt-bindings/clock/qcom,qcs615-dispcc.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H +#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H + +/* DISP_CC clocks */ +#define DISP_CC_MDSS_AHB_CLK 0 +#define DISP_CC_MDSS_AHB_CLK_SRC 1 +#define DISP_CC_MDSS_BYTE0_CLK 2 +#define DISP_CC_MDSS_BYTE0_CLK_SRC 3 +#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 4 +#define DISP_CC_MDSS_BYTE0_INTF_CLK 5 +#define DISP_CC_MDSS_DP_AUX_CLK 6 +#define DISP_CC_MDSS_DP_AUX_CLK_SRC 7 +#define DISP_CC_MDSS_DP_CRYPTO_CLK 8 +#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9 +#define DISP_CC_MDSS_DP_LINK_CLK 10 +#define DISP_CC_MDSS_DP_LINK_CLK_SRC 11 +#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 12 +#define DISP_CC_MDSS_DP_LINK_INTF_CLK 13 +#define DISP_CC_MDSS_DP_PIXEL1_CLK 14 +#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 15 +#define DISP_CC_MDSS_DP_PIXEL_CLK 16 +#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 17 +#define DISP_CC_MDSS_ESC0_CLK 18 +#define DISP_CC_MDSS_ESC0_CLK_SRC 19 +#define DISP_CC_MDSS_MDP_CLK 20 +#define DISP_CC_MDSS_MDP_CLK_SRC 21 +#define DISP_CC_MDSS_MDP_LUT_CLK 22 +#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 23 +#define DISP_CC_MDSS_PCLK0_CLK 24 +#define DISP_CC_MDSS_PCLK0_CLK_SRC 25 +#define DISP_CC_MDSS_ROT_CLK 26 +#define DISP_CC_MDSS_ROT_CLK_SRC 27 +#define DISP_CC_MDSS_RSCC_AHB_CLK 28 +#define DISP_CC_MDSS_RSCC_VSYNC_CLK 29 +#define DISP_CC_MDSS_VSYNC_CLK 30 +#define DISP_CC_MDSS_VSYNC_CLK_SRC 31 +#define DISP_CC_PLL0 32 +#define DISP_CC_XO_CLK 33 + +/* DISP_CC power domains */ +#define MDSS_CORE_GDSC 0 + +/* DISP_CC resets */ +#define DISP_CC_MDSS_CORE_BCR 0 +#define DISP_CC_MDSS_RSCC_BCR 1 + +#endif diff --git a/include/dt-bindings/clock/qcom,qcs615-gpucc.h b/include/dt-bindings/clock/qcom,qcs615-gpucc.h new file mode 100644 index 000000000000..6d8394b90d59 --- /dev/null +++ b/include/dt-bindings/clock/qcom,qcs615-gpucc.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H +#define _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H + +/* GPU_CC clocks */ +#define CRC_DIV_PLL0 0 +#define CRC_DIV_PLL1 1 +#define GPU_CC_PLL0 2 +#define GPU_CC_PLL1 3 +#define GPU_CC_CRC_AHB_CLK 4 +#define GPU_CC_CX_GFX3D_CLK 5 +#define GPU_CC_CX_GFX3D_SLV_CLK 6 +#define GPU_CC_CX_GMU_CLK 7 +#define GPU_CC_CX_SNOC_DVM_CLK 8 +#define GPU_CC_CXO_AON_CLK 9 +#define GPU_CC_CXO_CLK 10 +#define GPU_CC_GMU_CLK_SRC 11 +#define GPU_CC_GX_GFX3D_CLK 12 +#define GPU_CC_GX_GFX3D_CLK_SRC 13 +#define GPU_CC_GX_GMU_CLK 14 +#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 15 +#define GPU_CC_SLEEP_CLK 16 + +/* GPU_CC power domains */ +#define CX_GDSC 0 +#define GX_GDSC 1 + +/* GPU_CC resets */ +#define GPU_CC_CX_BCR 0 +#define GPU_CC_GFX3D_AON_BCR 1 +#define GPU_CC_GMU_BCR 2 +#define GPU_CC_GX_BCR 3 +#define GPU_CC_XO_BCR 4 + +#endif diff --git a/include/dt-bindings/clock/qcom,qcs615-videocc.h b/include/dt-bindings/clock/qcom,qcs615-videocc.h new file mode 100644 index 000000000000..0ca3efb21103 --- /dev/null +++ b/include/dt-bindings/clock/qcom,qcs615-videocc.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* + * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H +#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H + +/* VIDEO_CC clocks */ +#define VIDEO_CC_SLEEP_CLK 0 +#define VIDEO_CC_SLEEP_CLK_SRC 1 +#define VIDEO_CC_VCODEC0_AXI_CLK 2 +#define VIDEO_CC_VCODEC0_CORE_CLK 3 +#define VIDEO_CC_VENUS_AHB_CLK 4 +#define VIDEO_CC_VENUS_CLK_SRC 5 +#define VIDEO_CC_VENUS_CTL_AXI_CLK 6 +#define VIDEO_CC_VENUS_CTL_CORE_CLK 7 +#define VIDEO_CC_XO_CLK 8 +#define VIDEO_PLL0 9 + +/* VIDEO_CC power domains */ +#define VCODEC0_GDSC 0 +#define VENUS_GDSC 1 + +/* VIDEO_CC resets */ +#define VIDEO_CC_INTERFACE_BCR 0 +#define VIDEO_CC_VCODEC0_BCR 1 +#define VIDEO_CC_VENUS_BCR 2 + +#endif diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h index 24ba9e2a5cf6..710c340f24a5 100644 --- a/include/dt-bindings/clock/qcom,x1e80100-gcc.h +++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h @@ -482,4 +482,6 @@ #define GCC_USB_1_PHY_BCR 85 #define GCC_USB_2_PHY_BCR 86 #define GCC_VIDEO_BCR 87 +#define GCC_VIDEO_AXI0_CLK_ARES 88 +#define GCC_VIDEO_AXI1_CLK_ARES 89 #endif diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h index 131993343777..e1f65f1928cf 100644 --- a/include/dt-bindings/clock/r9a07g043-cpg.h +++ b/include/dt-bindings/clock/r9a07g043-cpg.h @@ -200,57 +200,4 @@ #define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */ #define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */ -/* Power domain IDs. */ -#define R9A07G043_PD_ALWAYS_ON 0 -#define R9A07G043_PD_GIC 1 /* RZ/G2UL Only */ -#define R9A07G043_PD_IA55 2 /* RZ/G2UL Only */ -#define R9A07G043_PD_MHU 3 /* RZ/G2UL Only */ -#define R9A07G043_PD_CORESIGHT 4 /* RZ/G2UL Only */ -#define R9A07G043_PD_SYC 5 /* RZ/G2UL Only */ -#define R9A07G043_PD_DMAC 6 -#define R9A07G043_PD_GTM0 7 -#define R9A07G043_PD_GTM1 8 -#define R9A07G043_PD_GTM2 9 -#define R9A07G043_PD_MTU 10 -#define R9A07G043_PD_POE3 11 -#define R9A07G043_PD_WDT0 12 -#define R9A07G043_PD_SPI 13 -#define R9A07G043_PD_SDHI0 14 -#define R9A07G043_PD_SDHI1 15 -#define R9A07G043_PD_ISU 16 /* RZ/G2UL Only */ -#define R9A07G043_PD_CRU 17 /* RZ/G2UL Only */ -#define R9A07G043_PD_LCDC 18 /* RZ/G2UL Only */ -#define R9A07G043_PD_SSI0 19 -#define R9A07G043_PD_SSI1 20 -#define R9A07G043_PD_SSI2 21 -#define R9A07G043_PD_SSI3 22 -#define R9A07G043_PD_SRC 23 -#define R9A07G043_PD_USB0 24 -#define R9A07G043_PD_USB1 25 -#define R9A07G043_PD_USB_PHY 26 -#define R9A07G043_PD_ETHER0 27 -#define R9A07G043_PD_ETHER1 28 -#define R9A07G043_PD_I2C0 29 -#define R9A07G043_PD_I2C1 30 -#define R9A07G043_PD_I2C2 31 -#define R9A07G043_PD_I2C3 32 -#define R9A07G043_PD_SCIF0 33 -#define R9A07G043_PD_SCIF1 34 -#define R9A07G043_PD_SCIF2 35 -#define R9A07G043_PD_SCIF3 36 -#define R9A07G043_PD_SCIF4 37 -#define R9A07G043_PD_SCI0 38 -#define R9A07G043_PD_SCI1 39 -#define R9A07G043_PD_IRDA 40 -#define R9A07G043_PD_RSPI0 41 -#define R9A07G043_PD_RSPI1 42 -#define R9A07G043_PD_RSPI2 43 -#define R9A07G043_PD_CANFD 44 -#define R9A07G043_PD_ADC 45 -#define R9A07G043_PD_TSU 46 -#define R9A07G043_PD_PLIC 47 /* RZ/Five Only */ -#define R9A07G043_PD_IAX45 48 /* RZ/Five Only */ -#define R9A07G043_PD_NCEPLDM 49 /* RZ/Five Only */ -#define R9A07G043_PD_NCEPLMT 50 /* RZ/Five Only */ - #endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */ diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h index e209f96f92b7..0bb17ff1a01a 100644 --- a/include/dt-bindings/clock/r9a07g044-cpg.h +++ b/include/dt-bindings/clock/r9a07g044-cpg.h @@ -217,62 +217,4 @@ #define R9A07G044_ADC_ADRST_N 82 #define R9A07G044_TSU_PRESETN 83 -/* Power domain IDs. */ -#define R9A07G044_PD_ALWAYS_ON 0 -#define R9A07G044_PD_GIC 1 -#define R9A07G044_PD_IA55 2 -#define R9A07G044_PD_MHU 3 -#define R9A07G044_PD_CORESIGHT 4 -#define R9A07G044_PD_SYC 5 -#define R9A07G044_PD_DMAC 6 -#define R9A07G044_PD_GTM0 7 -#define R9A07G044_PD_GTM1 8 -#define R9A07G044_PD_GTM2 9 -#define R9A07G044_PD_MTU 10 -#define R9A07G044_PD_POE3 11 -#define R9A07G044_PD_GPT 12 -#define R9A07G044_PD_POEGA 13 -#define R9A07G044_PD_POEGB 14 -#define R9A07G044_PD_POEGC 15 -#define R9A07G044_PD_POEGD 16 -#define R9A07G044_PD_WDT0 17 -#define R9A07G044_PD_WDT1 18 -#define R9A07G044_PD_SPI 19 -#define R9A07G044_PD_SDHI0 20 -#define R9A07G044_PD_SDHI1 21 -#define R9A07G044_PD_3DGE 22 -#define R9A07G044_PD_ISU 23 -#define R9A07G044_PD_VCPL4 24 -#define R9A07G044_PD_CRU 25 -#define R9A07G044_PD_MIPI_DSI 26 -#define R9A07G044_PD_LCDC 27 -#define R9A07G044_PD_SSI0 28 -#define R9A07G044_PD_SSI1 29 -#define R9A07G044_PD_SSI2 30 -#define R9A07G044_PD_SSI3 31 -#define R9A07G044_PD_SRC 32 -#define R9A07G044_PD_USB0 33 -#define R9A07G044_PD_USB1 34 -#define R9A07G044_PD_USB_PHY 35 -#define R9A07G044_PD_ETHER0 36 -#define R9A07G044_PD_ETHER1 37 -#define R9A07G044_PD_I2C0 38 -#define R9A07G044_PD_I2C1 39 -#define R9A07G044_PD_I2C2 40 -#define R9A07G044_PD_I2C3 41 -#define R9A07G044_PD_SCIF0 42 -#define R9A07G044_PD_SCIF1 43 -#define R9A07G044_PD_SCIF2 44 -#define R9A07G044_PD_SCIF3 45 -#define R9A07G044_PD_SCIF4 46 -#define R9A07G044_PD_SCI0 47 -#define R9A07G044_PD_SCI1 48 -#define R9A07G044_PD_IRDA 49 -#define R9A07G044_PD_RSPI0 50 -#define R9A07G044_PD_RSPI1 51 -#define R9A07G044_PD_RSPI2 52 -#define R9A07G044_PD_CANFD 53 -#define R9A07G044_PD_ADC 54 -#define R9A07G044_PD_TSU 55 - #endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */ diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h index 2c99f89397c4..43f4dbda872c 100644 --- a/include/dt-bindings/clock/r9a07g054-cpg.h +++ b/include/dt-bindings/clock/r9a07g054-cpg.h @@ -226,62 +226,4 @@ #define R9A07G054_TSU_PRESETN 83 #define R9A07G054_STPAI_ARESETN 84 -/* Power domain IDs. */ -#define R9A07G054_PD_ALWAYS_ON 0 -#define R9A07G054_PD_GIC 1 -#define R9A07G054_PD_IA55 2 -#define R9A07G054_PD_MHU 3 -#define R9A07G054_PD_CORESIGHT 4 -#define R9A07G054_PD_SYC 5 -#define R9A07G054_PD_DMAC 6 -#define R9A07G054_PD_GTM0 7 -#define R9A07G054_PD_GTM1 8 -#define R9A07G054_PD_GTM2 9 -#define R9A07G054_PD_MTU 10 -#define R9A07G054_PD_POE3 11 -#define R9A07G054_PD_GPT 12 -#define R9A07G054_PD_POEGA 13 -#define R9A07G054_PD_POEGB 14 -#define R9A07G054_PD_POEGC 15 -#define R9A07G054_PD_POEGD 16 -#define R9A07G054_PD_WDT0 17 -#define R9A07G054_PD_WDT1 18 -#define R9A07G054_PD_SPI 19 -#define R9A07G054_PD_SDHI0 20 -#define R9A07G054_PD_SDHI1 21 -#define R9A07G054_PD_3DGE 22 -#define R9A07G054_PD_ISU 23 -#define R9A07G054_PD_VCPL4 24 -#define R9A07G054_PD_CRU 25 -#define R9A07G054_PD_MIPI_DSI 26 -#define R9A07G054_PD_LCDC 27 -#define R9A07G054_PD_SSI0 28 -#define R9A07G054_PD_SSI1 29 -#define R9A07G054_PD_SSI2 30 -#define R9A07G054_PD_SSI3 31 -#define R9A07G054_PD_SRC 32 -#define R9A07G054_PD_USB0 33 -#define R9A07G054_PD_USB1 34 -#define R9A07G054_PD_USB_PHY 35 -#define R9A07G054_PD_ETHER0 36 -#define R9A07G054_PD_ETHER1 37 -#define R9A07G054_PD_I2C0 38 -#define R9A07G054_PD_I2C1 39 -#define R9A07G054_PD_I2C2 40 -#define R9A07G054_PD_I2C3 41 -#define R9A07G054_PD_SCIF0 42 -#define R9A07G054_PD_SCIF1 43 -#define R9A07G054_PD_SCIF2 44 -#define R9A07G054_PD_SCIF3 45 -#define R9A07G054_PD_SCIF4 46 -#define R9A07G054_PD_SCI0 47 -#define R9A07G054_PD_SCI1 48 -#define R9A07G054_PD_IRDA 49 -#define R9A07G054_PD_RSPI0 50 -#define R9A07G054_PD_RSPI1 51 -#define R9A07G054_PD_RSPI2 52 -#define R9A07G054_PD_CANFD 53 -#define R9A07G054_PD_ADC 54 -#define R9A07G054_PD_TSU 55 - #endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */ diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h index 311521fe4b59..410725b778a8 100644 --- a/include/dt-bindings/clock/r9a08g045-cpg.h +++ b/include/dt-bindings/clock/r9a08g045-cpg.h @@ -239,75 +239,4 @@ #define R9A08G045_I3C_PRESETN 92 #define R9A08G045_VBAT_BRESETN 93 -/* Power domain IDs. */ -#define R9A08G045_PD_ALWAYS_ON 0 -#define R9A08G045_PD_GIC 1 -#define R9A08G045_PD_IA55 2 -#define R9A08G045_PD_MHU 3 -#define R9A08G045_PD_CORESIGHT 4 -#define R9A08G045_PD_SYC 5 -#define R9A08G045_PD_DMAC 6 -#define R9A08G045_PD_GTM0 7 -#define R9A08G045_PD_GTM1 8 -#define R9A08G045_PD_GTM2 9 -#define R9A08G045_PD_GTM3 10 -#define R9A08G045_PD_GTM4 11 -#define R9A08G045_PD_GTM5 12 -#define R9A08G045_PD_GTM6 13 -#define R9A08G045_PD_GTM7 14 -#define R9A08G045_PD_MTU 15 -#define R9A08G045_PD_POE3 16 -#define R9A08G045_PD_GPT 17 -#define R9A08G045_PD_POEGA 18 -#define R9A08G045_PD_POEGB 19 -#define R9A08G045_PD_POEGC 20 -#define R9A08G045_PD_POEGD 21 -#define R9A08G045_PD_WDT0 22 -#define R9A08G045_PD_XSPI 23 -#define R9A08G045_PD_SDHI0 24 -#define R9A08G045_PD_SDHI1 25 -#define R9A08G045_PD_SDHI2 26 -#define R9A08G045_PD_SSI0 27 -#define R9A08G045_PD_SSI1 28 -#define R9A08G045_PD_SSI2 29 -#define R9A08G045_PD_SSI3 30 -#define R9A08G045_PD_SRC 31 -#define R9A08G045_PD_USB0 32 -#define R9A08G045_PD_USB1 33 -#define R9A08G045_PD_USB_PHY 34 -#define R9A08G045_PD_ETHER0 35 -#define R9A08G045_PD_ETHER1 36 -#define R9A08G045_PD_I2C0 37 -#define R9A08G045_PD_I2C1 38 -#define R9A08G045_PD_I2C2 39 -#define R9A08G045_PD_I2C3 40 -#define R9A08G045_PD_SCIF0 41 -#define R9A08G045_PD_SCIF1 42 -#define R9A08G045_PD_SCIF2 43 -#define R9A08G045_PD_SCIF3 44 -#define R9A08G045_PD_SCIF4 45 -#define R9A08G045_PD_SCIF5 46 -#define R9A08G045_PD_SCI0 47 -#define R9A08G045_PD_SCI1 48 -#define R9A08G045_PD_IRDA 49 -#define R9A08G045_PD_RSPI0 50 -#define R9A08G045_PD_RSPI1 51 -#define R9A08G045_PD_RSPI2 52 -#define R9A08G045_PD_RSPI3 53 -#define R9A08G045_PD_RSPI4 54 -#define R9A08G045_PD_CANFD 55 -#define R9A08G045_PD_ADC 56 -#define R9A08G045_PD_TSU 57 -#define R9A08G045_PD_OCTA 58 -#define R9A08G045_PD_PDM 59 -#define R9A08G045_PD_PCI 60 -#define R9A08G045_PD_SPDIF 61 -#define R9A08G045_PD_I3C 62 -#define R9A08G045_PD_VBAT 63 - -#define R9A08G045_PD_DDR 64 -#define R9A08G045_PD_TZCDDR 65 -#define R9A08G045_PD_OTFDE_DDR 66 -#define R9A08G045_PD_RTC 67 - #endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */ diff --git a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h new file mode 100644 index 000000000000..7ecc4f0b235a --- /dev/null +++ b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2025 Renesas Electronics Corp. + */ + +#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ +#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* R9A09G077 CPG Core Clocks */ +#define R9A09G077_CLK_CA55C0 0 +#define R9A09G077_CLK_CA55C1 1 +#define R9A09G077_CLK_CA55C2 2 +#define R9A09G077_CLK_CA55C3 3 +#define R9A09G077_CLK_CA55S 4 +#define R9A09G077_CLK_CR52_CPU0 5 +#define R9A09G077_CLK_CR52_CPU1 6 +#define R9A09G077_CLK_CKIO 7 +#define R9A09G077_CLK_PCLKAH 8 +#define R9A09G077_CLK_PCLKAM 9 +#define R9A09G077_CLK_PCLKAL 10 +#define R9A09G077_CLK_PCLKGPTL 11 +#define R9A09G077_CLK_PCLKH 12 +#define R9A09G077_CLK_PCLKM 13 +#define R9A09G077_CLK_PCLKL 14 +#define R9A09G077_SDHI_CLKHS 15 + +#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ */ diff --git a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h new file mode 100644 index 000000000000..925e57703925 --- /dev/null +++ b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) + * + * Copyright (C) 2025 Renesas Electronics Corp. + */ + +#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ +#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ + +#include <dt-bindings/clock/renesas-cpg-mssr.h> + +/* R9A09G087 CPG Core Clocks */ +#define R9A09G087_CLK_CA55C0 0 +#define R9A09G087_CLK_CA55C1 1 +#define R9A09G087_CLK_CA55C2 2 +#define R9A09G087_CLK_CA55C3 3 +#define R9A09G087_CLK_CA55S 4 +#define R9A09G087_CLK_CR52_CPU0 5 +#define R9A09G087_CLK_CR52_CPU1 6 +#define R9A09G087_CLK_CKIO 7 +#define R9A09G087_CLK_PCLKAH 8 +#define R9A09G087_CLK_PCLKAM 9 +#define R9A09G087_CLK_PCLKAL 10 +#define R9A09G087_CLK_PCLKGPTL 11 +#define R9A09G087_CLK_PCLKH 12 +#define R9A09G087_CLK_PCLKM 13 +#define R9A09G087_CLK_PCLKL 14 +#define R9A09G087_SDHI_CLKHS 15 + +#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ */ diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h index 5e6896e9627f..93e6233d1358 100644 --- a/include/dt-bindings/clock/samsung,exynosautov920.h +++ b/include/dt-bindings/clock/samsung,exynosautov920.h @@ -286,4 +286,13 @@ #define CLK_MOUT_HSI1_USBDRD_USER 3 #define CLK_MOUT_HSI1_USBDRD 4 +/* CMU_HSI2 */ +#define FOUT_PLL_ETH 1 +#define CLK_MOUT_HSI2_NOC_UFS_USER 2 +#define CLK_MOUT_HSI2_UFS_EMBD_USER 3 +#define CLK_MOUT_HSI2_ETHERNET 4 +#define CLK_MOUT_HSI2_ETHERNET_USER 5 +#define CLK_DOUT_HSI2_ETHERNET 6 +#define CLK_DOUT_HSI2_ETHERNET_PTP 7 + #endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */ diff --git a/include/dt-bindings/pinctrl/stm32-pinfunc.h b/include/dt-bindings/pinctrl/stm32-pinfunc.h index 28ad0235086a..af3fd388329a 100644 --- a/include/dt-bindings/pinctrl/stm32-pinfunc.h +++ b/include/dt-bindings/pinctrl/stm32-pinfunc.h @@ -26,6 +26,7 @@ #define AF14 0xf #define AF15 0x10 #define ANALOG 0x11 +#define RSVD 0x12 /* define Pins number*/ #define PIN_NO(port, line) (((port) - 'A') * 0x10 + (line)) diff --git a/include/linux/adi-axi-common.h b/include/linux/adi-axi-common.h new file mode 100644 index 000000000000..f64f4ad4beda --- /dev/null +++ b/include/linux/adi-axi-common.h @@ -0,0 +1,56 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Analog Devices AXI common registers & definitions + * + * Copyright 2019 Analog Devices Inc. + * + * https://wiki.analog.com/resources/fpga/docs/axi_ip + * https://wiki.analog.com/resources/fpga/docs/hdl/regmap + */ + +#ifndef ADI_AXI_COMMON_H_ +#define ADI_AXI_COMMON_H_ + +#define ADI_AXI_REG_VERSION 0x0000 +#define ADI_AXI_REG_FPGA_INFO 0x001C + +#define ADI_AXI_PCORE_VER(major, minor, patch) \ + (((major) << 16) | ((minor) << 8) | (patch)) + +#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff) +#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff) +#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff) + +#define ADI_AXI_INFO_FPGA_TECH(info) (((info) >> 24) & 0xff) +#define ADI_AXI_INFO_FPGA_FAMILY(info) (((info) >> 16) & 0xff) +#define ADI_AXI_INFO_FPGA_SPEED_GRADE(info) (((info) >> 8) & 0xff) + +enum adi_axi_fpga_technology { + ADI_AXI_FPGA_TECH_UNKNOWN = 0, + ADI_AXI_FPGA_TECH_SERIES7, + ADI_AXI_FPGA_TECH_ULTRASCALE, + ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS, +}; + +enum adi_axi_fpga_family { + ADI_AXI_FPGA_FAMILY_UNKNOWN = 0, + ADI_AXI_FPGA_FAMILY_ARTIX, + ADI_AXI_FPGA_FAMILY_KINTEX, + ADI_AXI_FPGA_FAMILY_VIRTEX, + ADI_AXI_FPGA_FAMILY_ZYNQ, +}; + +enum adi_axi_fpga_speed_grade { + ADI_AXI_FPGA_SPEED_UNKNOWN = 0, + ADI_AXI_FPGA_SPEED_1 = 10, + ADI_AXI_FPGA_SPEED_1L = 11, + ADI_AXI_FPGA_SPEED_1H = 12, + ADI_AXI_FPGA_SPEED_1HV = 13, + ADI_AXI_FPGA_SPEED_1LV = 14, + ADI_AXI_FPGA_SPEED_2 = 20, + ADI_AXI_FPGA_SPEED_2L = 21, + ADI_AXI_FPGA_SPEED_2LV = 22, + ADI_AXI_FPGA_SPEED_3 = 30, +}; + +#endif /* ADI_AXI_COMMON_H_ */ diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h index 8f7931eb7d16..9ef2633e2c08 100644 --- a/include/linux/alloc_tag.h +++ b/include/linux/alloc_tag.h @@ -88,7 +88,7 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct) return container_of(ct, struct alloc_tag, ct); } -#ifdef ARCH_NEEDS_WEAK_PER_CPU +#if defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE) /* * When percpu variables are required to be defined as weak, static percpu * variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION). @@ -102,7 +102,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); .ct = CODE_TAG_INIT, \ .counters = &_shared_alloc_tag }; -#else /* ARCH_NEEDS_WEAK_PER_CPU */ +#else /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */ #ifdef MODULE @@ -123,7 +123,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag); #endif /* MODULE */ -#endif /* ARCH_NEEDS_WEAK_PER_CPU */ +#endif /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */ DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT, mem_alloc_profiling_key); diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 5ca2d5699620..7cfe48769239 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h @@ -4,12 +4,13 @@ * * Common interface definitions for making balloon pages movable by compaction. * - * Balloon page migration makes use of the general non-lru movable page + * Balloon page migration makes use of the general "movable_ops page migration" * feature. * * page->private is used to reference the responsible balloon device. - * page->mapping is used in context of non-lru page migration to reference - * the address space operations for page isolation/migration/compaction. + * That these pages have movable_ops, and which movable_ops apply, + * is derived from the page type (PageOffline()) combined with the + * PG_movable_ops flag (PageMovableOps()). * * As the page isolation scanning step a compaction thread does is a lockless * procedure (from a page standpoint), it might bring some racy situations while @@ -17,12 +18,10 @@ * and safely perform balloon's page compaction and migration we must, always, * ensure following these simple rules: * - * i. when updating a balloon's page ->mapping element, strictly do it under - * the following lock order, independently of the far superior - * locking scheme (lru_lock, balloon_lock): + * i. Setting the PG_movable_ops flag and page->private with the following + * lock order * +-page_lock(page); * +--spin_lock_irq(&b_dev_info->pages_lock); - * ... page->mapping updates here ... * * ii. isolation or dequeueing procedure must remove the page from balloon * device page list under b_dev_info->pages_lock. @@ -78,6 +77,15 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon) #ifdef CONFIG_BALLOON_COMPACTION extern const struct movable_operations balloon_mops; +/* + * balloon_page_device - get the b_dev_info descriptor for the balloon device + * that enqueues the given page. + */ +static inline struct balloon_dev_info *balloon_page_device(struct page *page) +{ + return (struct balloon_dev_info *)page_private(page); +} +#endif /* CONFIG_BALLOON_COMPACTION */ /* * balloon_page_insert - insert a page into the balloon's page list and make @@ -92,68 +100,34 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, struct page *page) { __SetPageOffline(page); - __SetPageMovable(page, &balloon_mops); - set_page_private(page, (unsigned long)balloon); + if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) { + SetPageMovableOps(page); + set_page_private(page, (unsigned long)balloon); + } list_add(&page->lru, &balloon->pages); } -/* - * balloon_page_delete - delete a page from balloon's page list and clear - * the page->private assignement accordingly. - * @page : page to be released from balloon's page list - * - * Caller must ensure the page is locked and the spin_lock protecting balloon - * pages list is held before deleting a page from the balloon device. - */ -static inline void balloon_page_delete(struct page *page) +static inline gfp_t balloon_mapping_gfp_mask(void) { - __ClearPageOffline(page); - __ClearPageMovable(page); - set_page_private(page, 0); - /* - * No touch page.lru field once @page has been isolated - * because VM is using the field. - */ - if (!PageIsolated(page)) - list_del(&page->lru); + if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) + return GFP_HIGHUSER_MOVABLE; + return GFP_HIGHUSER; } /* - * balloon_page_device - get the b_dev_info descriptor for the balloon device - * that enqueues the given page. + * balloon_page_finalize - prepare a balloon page that was removed from the + * balloon list for release to the page allocator + * @page: page to be released to the page allocator + * + * Caller must ensure that the page is locked. */ -static inline struct balloon_dev_info *balloon_page_device(struct page *page) +static inline void balloon_page_finalize(struct page *page) { - return (struct balloon_dev_info *)page_private(page); + if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) + set_page_private(page, 0); + /* PageOffline is sticky until the page is freed to the buddy. */ } -static inline gfp_t balloon_mapping_gfp_mask(void) -{ - return GFP_HIGHUSER_MOVABLE; -} - -#else /* !CONFIG_BALLOON_COMPACTION */ - -static inline void balloon_page_insert(struct balloon_dev_info *balloon, - struct page *page) -{ - __SetPageOffline(page); - list_add(&page->lru, &balloon->pages); -} - -static inline void balloon_page_delete(struct page *page) -{ - __ClearPageOffline(page); - list_del(&page->lru); -} - -static inline gfp_t balloon_mapping_gfp_mask(void) -{ - return GFP_HIGHUSER; -} - -#endif /* CONFIG_BALLOON_COMPACTION */ - /* * balloon_page_push - insert a page into a page list. * @head : pointer to list diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h index 7615f8d7b1ed..e4b6ce953ddb 100644 --- a/include/linux/bcm47xx_nvram.h +++ b/include/linux/bcm47xx_nvram.h @@ -7,7 +7,6 @@ #include <linux/errno.h> #include <linux/types.h> -#include <linux/kernel.h> #include <linux/vmalloc.h> #ifdef CONFIG_BCM47XX_NVRAM diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h index f8254fd53e15..40a7da3ef50e 100644 --- a/include/linux/bcm47xx_sprom.h +++ b/include/linux/bcm47xx_sprom.h @@ -5,8 +5,8 @@ #ifndef __BCM47XX_SPROM_H #define __BCM47XX_SPROM_H +#include <linux/errno.h> #include <linux/types.h> -#include <linux/kernel.h> #include <linux/vmalloc.h> struct ssb_sprom; diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 6d9a53db54b6..5355f8f806a9 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h @@ -189,14 +189,14 @@ static __always_inline u64 field_mask(u64 field) } #define field_max(field) ((typeof(field))field_mask(field)) #define ____MAKE_OP(type,base,to,from) \ -static __always_inline __##type type##_encode_bits(base v, base field) \ +static __always_inline __##type __must_check type##_encode_bits(base v, base field) \ { \ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \ __field_overflow(); \ return to((v & field_mask(field)) * field_multiplier(field)); \ } \ -static __always_inline __##type type##_replace_bits(__##type old, \ - base val, base field) \ +static __always_inline __##type __must_check type##_replace_bits(__##type old, \ + base val, base field) \ { \ return (old & ~to(field)) | type##_encode_bits(val, field); \ } \ @@ -205,7 +205,7 @@ static __always_inline void type##p_replace_bits(__##type *p, \ { \ *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ } \ -static __always_inline base type##_get_bits(__##type v, base field) \ +static __always_inline base __must_check type##_get_bits(__##type v, base field) \ { \ return (from(v) & field)/field_multiplier(field); \ } diff --git a/include/linux/bits.h b/include/linux/bits.h index 7ad056219115..a40cc861b3a7 100644 --- a/include/linux/bits.h +++ b/include/linux/bits.h @@ -2,10 +2,8 @@ #ifndef __LINUX_BITS_H #define __LINUX_BITS_H -#include <linux/const.h> #include <vdso/bits.h> #include <uapi/linux/bits.h> -#include <asm/bitsperlong.h> #define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG)) #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) @@ -50,10 +48,14 @@ (type_max(t) << (l) & \ type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h))))) +#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l) +#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l) + #define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l) #define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l) #define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l) #define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l) +#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l) /* * Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The @@ -79,28 +81,9 @@ * BUILD_BUG_ON_ZERO is not available in h files included from asm files, * disable the input check if that is the case. */ -#define GENMASK_INPUT_CHECK(h, l) 0 +#define GENMASK(h, l) __GENMASK(h, l) +#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l) #endif /* !defined(__ASSEMBLY__) */ -#define GENMASK(h, l) \ - (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l)) -#define GENMASK_ULL(h, l) \ - (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l)) - -#if !defined(__ASSEMBLY__) -/* - * Missing asm support - * - * __GENMASK_U128() depends on _BIT128() which would not work - * in the asm code, as it shifts an 'unsigned __int128' data - * type instead of direct representation of 128 bit constants - * such as long and unsigned long. The fundamental problem is - * that a 128 bit constant will get silently truncated by the - * gcc compiler. - */ -#define GENMASK_U128(h, l) \ - (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l)) -#endif - #endif /* __LINUX_BITS_H */ diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 082ccd8ad96b..aedf573bdb42 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h @@ -77,9 +77,6 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type) extern struct static_key_false cgroup_bpf_enabled_key[MAX_CGROUP_BPF_ATTACH_TYPE]; #define cgroup_bpf_enabled(atype) static_branch_unlikely(&cgroup_bpf_enabled_key[atype]) -#define for_each_cgroup_storage_type(stype) \ - for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) - struct bpf_cgroup_storage_map; struct bpf_storage_buffer { @@ -510,8 +507,6 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map, #define BPF_CGROUP_RUN_PROG_SETSOCKOPT(sock, level, optname, optval, optlen, \ kernel_optval) ({ 0; }) -#define for_each_cgroup_storage_type(stype) for (; false; ) - #endif /* CONFIG_CGROUP_BPF */ #endif /* _BPF_CGROUP_H */ diff --git a/include/linux/bpf.h b/include/linux/bpf.h index f9cd2164ed23..cc700925b802 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h @@ -208,6 +208,20 @@ enum btf_field_type { BPF_RES_SPIN_LOCK = (1 << 12), }; +enum bpf_cgroup_storage_type { + BPF_CGROUP_STORAGE_SHARED, + BPF_CGROUP_STORAGE_PERCPU, + __BPF_CGROUP_STORAGE_MAX +#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX +}; + +#ifdef CONFIG_CGROUP_BPF +# define for_each_cgroup_storage_type(stype) \ + for (stype = 0; stype < MAX_BPF_CGROUP_STORAGE_TYPE; stype++) +#else +# define for_each_cgroup_storage_type(stype) for (; false; ) +#endif /* CONFIG_CGROUP_BPF */ + typedef void (*btf_dtor_kfunc_t)(void *); struct btf_field_kptr { @@ -260,6 +274,19 @@ struct bpf_list_node_kern { void *owner; } __attribute__((aligned(8))); +/* 'Ownership' of program-containing map is claimed by the first program + * that is going to use this map or by the first program which FD is + * stored in the map to make sure that all callers and callees have the + * same prog type, JITed flag and xdp_has_frags flag. + */ +struct bpf_map_owner { + enum bpf_prog_type type; + bool jited; + bool xdp_has_frags; + u64 storage_cookie[MAX_BPF_CGROUP_STORAGE_TYPE]; + const struct btf_type *attach_func_proto; +}; + struct bpf_map { const struct bpf_map_ops *ops; struct bpf_map *inner_map_meta; @@ -292,24 +319,15 @@ struct bpf_map { struct rcu_head rcu; }; atomic64_t writecnt; - /* 'Ownership' of program-containing map is claimed by the first program - * that is going to use this map or by the first program which FD is - * stored in the map to make sure that all callers and callees have the - * same prog type, JITed flag and xdp_has_frags flag. - */ - struct { - const struct btf_type *attach_func_proto; - spinlock_t lock; - enum bpf_prog_type type; - bool jited; - bool xdp_has_frags; - } owner; + spinlock_t owner_lock; + struct bpf_map_owner *owner; bool bypass_spec_v1; bool frozen; /* write-once; write-protected by freeze_mutex */ bool free_after_mult_rcu_gp; bool free_after_rcu_gp; atomic64_t sleepable_refcnt; s64 __percpu *elem_count; + u64 cookie; /* write-once */ }; static inline const char *btf_field_type_name(enum btf_field_type type) @@ -1082,14 +1100,6 @@ struct bpf_prog_offload { u32 jited_len; }; -enum bpf_cgroup_storage_type { - BPF_CGROUP_STORAGE_SHARED, - BPF_CGROUP_STORAGE_PERCPU, - __BPF_CGROUP_STORAGE_MAX -}; - -#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX - /* The longest tracepoint has 12 args. * See include/trace/bpf_probe.h */ @@ -2108,6 +2118,16 @@ static inline bool bpf_map_flags_access_ok(u32 access_flags) (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); } +static inline struct bpf_map_owner *bpf_map_owner_alloc(struct bpf_map *map) +{ + return kzalloc(sizeof(*map->owner), GFP_ATOMIC); +} + +static inline void bpf_map_owner_free(struct bpf_map *map) +{ + kfree(map->owner); +} + struct bpf_event_entry { struct perf_event *event; struct file *perf_file; diff --git a/include/linux/cfi.h b/include/linux/cfi.h index 1db17ecbb86c..52a98886a455 100644 --- a/include/linux/cfi.h +++ b/include/linux/cfi.h @@ -11,16 +11,9 @@ #include <linux/module.h> #include <asm/cfi.h> +#ifdef CONFIG_CFI_CLANG extern bool cfi_warn; -#ifndef cfi_get_offset -static inline int cfi_get_offset(void) -{ - return 0; -} -#endif - -#ifdef CONFIG_CFI_CLANG enum bug_trap_type report_cfi_failure(struct pt_regs *regs, unsigned long addr, unsigned long *target, u32 type); @@ -29,6 +22,44 @@ static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs, { return report_cfi_failure(regs, addr, NULL, 0); } + +#ifndef cfi_get_offset +/* + * Returns the CFI prefix offset. By default, the compiler emits only + * a 4-byte CFI type hash before the function. If an architecture + * uses -fpatchable-function-entry=N,M where M>0 to change the prefix + * offset, they must override this function. + */ +static inline int cfi_get_offset(void) +{ + return 4; +} +#endif + +#ifndef cfi_get_func_hash +static inline u32 cfi_get_func_hash(void *func) +{ + u32 hash; + + if (get_kernel_nofault(hash, func - cfi_get_offset())) + return 0; + + return hash; +} +#endif + +/* CFI type hashes for BPF function types */ +extern u32 cfi_bpf_hash; +extern u32 cfi_bpf_subprog_hash; + +#else /* CONFIG_CFI_CLANG */ + +static inline int cfi_get_offset(void) { return 0; } +static inline u32 cfi_get_func_hash(void *func) { return 0; } + +#define cfi_bpf_hash 0U +#define cfi_bpf_subprog_hash 0U + #endif /* CONFIG_CFI_CLANG */ #ifdef CONFIG_ARCH_USES_CFI_TRAPS diff --git a/include/linux/cfi_types.h b/include/linux/cfi_types.h index 6b8713675765..685f7181780f 100644 --- a/include/linux/cfi_types.h +++ b/include/linux/cfi_types.h @@ -41,5 +41,28 @@ SYM_TYPED_START(name, SYM_L_GLOBAL, SYM_A_ALIGN) #endif +#else /* __ASSEMBLY__ */ + +#ifdef CONFIG_CFI_CLANG +#define DEFINE_CFI_TYPE(name, func) \ + /* \ + * Force a reference to the function so the compiler generates \ + * __kcfi_typeid_<func>. \ + */ \ + __ADDRESSABLE(func); \ + /* u32 name __ro_after_init = __kcfi_typeid_<func> */ \ + extern u32 name; \ + asm ( \ + " .pushsection .data..ro_after_init,\"aw\",\%progbits \n" \ + " .type " #name ",\%object \n" \ + " .globl " #name " \n" \ + " .p2align 2, 0x0 \n" \ + #name ": \n" \ + " .4byte __kcfi_typeid_" #func " \n" \ + " .size " #name ", 4 \n" \ + " .popsection \n" \ + ); +#endif + #endif /* __ASSEMBLY__ */ #endif /* _LINUX_CFI_TYPES_H */ diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index e61687d5e496..6b93a64115fe 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h @@ -375,15 +375,12 @@ struct css_rstat_cpu { * Child cgroups with stat updates on this cpu since the last read * are linked on the parent's ->updated_children through * ->updated_next. updated_children is terminated by its container css. - * - * In addition to being more compact, singly-linked list pointing to - * the css makes it unnecessary for each per-cpu struct to point back - * to the associated css. - * - * Protected by per-cpu css->ss->rstat_ss_cpu_lock. */ struct cgroup_subsys_state *updated_children; struct cgroup_subsys_state *updated_next; /* NULL if not on the list */ + + struct llist_node lnode; /* lockless list for update */ + struct cgroup_subsys_state *owner; /* back pointer */ }; /* @@ -821,7 +818,7 @@ struct cgroup_subsys { unsigned int depends_on; spinlock_t rstat_ss_lock; - raw_spinlock_t __percpu *rstat_ss_cpu_lock; + struct llist_head __percpu *lhead; /* lockless update list head */ }; extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem; @@ -898,14 +895,12 @@ static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd) #endif } +#ifdef CONFIG_CGROUP_NET_CLASSID static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd) { -#ifdef CONFIG_CGROUP_NET_CLASSID return READ_ONCE(skcd->classid); -#else - return 0; -#endif } +#endif static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, u16 prioidx) @@ -915,13 +910,13 @@ static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd, #endif } +#ifdef CONFIG_CGROUP_NET_CLASSID static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd, u32 classid) { -#ifdef CONFIG_CGROUP_NET_CLASSID WRITE_ONCE(skcd->classid, classid); -#endif } +#endif #else /* CONFIG_SOCK_CGROUP_DATA */ diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h index bee606bebaca..2573585b7f06 100644 --- a/include/linux/cleanup.h +++ b/include/linux/cleanup.h @@ -3,6 +3,8 @@ #define _LINUX_CLEANUP_H #include <linux/compiler.h> +#include <linux/err.h> +#include <linux/args.h> /** * DOC: scope-based cleanup helpers @@ -61,9 +63,20 @@ * Observe the lock is held for the remainder of the "if ()" block not * the remainder of "func()". * - * Now, when a function uses both __free() and guard(), or multiple - * instances of __free(), the LIFO order of variable definition order - * matters. GCC documentation says: + * The ACQUIRE() macro can be used in all places that guard() can be + * used and additionally support conditional locks:: + * + * DEFINE_GUARD_COND(pci_dev, _try, pci_dev_trylock(_T)) + * ... + * ACQUIRE(pci_dev_try, lock)(dev); + * rc = ACQUIRE_ERR(pci_dev_try, &lock); + * if (rc) + * return rc; + * // @lock is held + * + * Now, when a function uses both __free() and guard()/ACQUIRE(), or + * multiple instances of __free(), the LIFO order of variable definition + * order matters. GCC documentation says: * * "When multiple variables in the same scope have cleanup attributes, * at exit from the scope their associated cleanup functions are run in @@ -313,14 +326,46 @@ _label: \ * acquire fails. * * Only for conditional locks. + * + * ACQUIRE(name, var): + * a named instance of the (guard) class, suitable for conditional + * locks when paired with ACQUIRE_ERR(). + * + * ACQUIRE_ERR(name, &var): + * a helper that is effectively a PTR_ERR() conversion of the guard + * pointer. Returns 0 when the lock was acquired and a negative + * error code otherwise. */ #define __DEFINE_CLASS_IS_CONDITIONAL(_name, _is_cond) \ static __maybe_unused const bool class_##_name##_is_conditional = _is_cond -#define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \ - static inline void * class_##_name##_lock_ptr(class_##_name##_t *_T) \ - { return (void *)(__force unsigned long)*(_exp); } +#define __GUARD_IS_ERR(_ptr) \ + ({ \ + unsigned long _rc = (__force unsigned long)(_ptr); \ + unlikely((_rc - 1) >= -MAX_ERRNO - 1); \ + }) + +#define __DEFINE_GUARD_LOCK_PTR(_name, _exp) \ + static inline void *class_##_name##_lock_ptr(class_##_name##_t *_T) \ + { \ + void *_ptr = (void *)(__force unsigned long)*(_exp); \ + if (IS_ERR(_ptr)) { \ + _ptr = NULL; \ + } \ + return _ptr; \ + } \ + static inline int class_##_name##_lock_err(class_##_name##_t *_T) \ + { \ + long _rc = (__force unsigned long)*(_exp); \ + if (!_rc) { \ + _rc = -EBUSY; \ + } \ + if (!IS_ERR_VALUE(_rc)) { \ + _rc = 0; \ + } \ + return _rc; \ + } #define DEFINE_CLASS_IS_GUARD(_name) \ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ @@ -331,23 +376,37 @@ static __maybe_unused const bool class_##_name##_is_conditional = _is_cond __DEFINE_GUARD_LOCK_PTR(_name, _T) #define DEFINE_GUARD(_name, _type, _lock, _unlock) \ - DEFINE_CLASS(_name, _type, if (_T) { _unlock; }, ({ _lock; _T; }), _type _T); \ + DEFINE_CLASS(_name, _type, if (!__GUARD_IS_ERR(_T)) { _unlock; }, ({ _lock; _T; }), _type _T); \ DEFINE_CLASS_IS_GUARD(_name) -#define DEFINE_GUARD_COND(_name, _ext, _condlock) \ +#define DEFINE_GUARD_COND_4(_name, _ext, _lock, _cond) \ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ - ({ void *_t = _T; if (_T && !(_condlock)) _t = NULL; _t; }), \ + ({ void *_t = _T; int _RET = (_lock); if (_T && !(_cond)) _t = ERR_PTR(_RET); _t; }), \ class_##_name##_t _T) \ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \ - { return class_##_name##_lock_ptr(_T); } + { return class_##_name##_lock_ptr(_T); } \ + static inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \ + { return class_##_name##_lock_err(_T); } + +/* + * Default binary condition; success on 'true'. + */ +#define DEFINE_GUARD_COND_3(_name, _ext, _lock) \ + DEFINE_GUARD_COND_4(_name, _ext, _lock, _RET) + +#define DEFINE_GUARD_COND(X...) CONCATENATE(DEFINE_GUARD_COND_, COUNT_ARGS(X))(X) #define guard(_name) \ CLASS(_name, __UNIQUE_ID(guard)) #define __guard_ptr(_name) class_##_name##_lock_ptr +#define __guard_err(_name) class_##_name##_lock_err #define __is_cond_ptr(_name) class_##_name##_is_conditional +#define ACQUIRE(_name, _var) CLASS(_name, _var) +#define ACQUIRE_ERR(_name, _var) __guard_err(_name)(_var) + /* * Helper macro for scoped_guard(). * @@ -409,7 +468,7 @@ typedef struct { \ \ static inline void class_##_name##_destructor(class_##_name##_t *_T) \ { \ - if (_T->lock) { _unlock; } \ + if (!__GUARD_IS_ERR(_T->lock)) { _unlock; } \ } \ \ __DEFINE_GUARD_LOCK_PTR(_name, &_T->lock) @@ -441,15 +500,22 @@ __DEFINE_CLASS_IS_CONDITIONAL(_name, false); \ __DEFINE_UNLOCK_GUARD(_name, void, _unlock, __VA_ARGS__) \ __DEFINE_LOCK_GUARD_0(_name, _lock) -#define DEFINE_LOCK_GUARD_1_COND(_name, _ext, _condlock) \ +#define DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _cond) \ __DEFINE_CLASS_IS_CONDITIONAL(_name##_ext, true); \ EXTEND_CLASS(_name, _ext, \ ({ class_##_name##_t _t = { .lock = l }, *_T = &_t;\ - if (_T->lock && !(_condlock)) _T->lock = NULL; \ + int _RET = (_lock); \ + if (_T->lock && !(_cond)) _T->lock = ERR_PTR(_RET);\ _t; }), \ typeof_member(class_##_name##_t, lock) l) \ static inline void * class_##_name##_ext##_lock_ptr(class_##_name##_t *_T) \ - { return class_##_name##_lock_ptr(_T); } + { return class_##_name##_lock_ptr(_T); } \ + static inline int class_##_name##_ext##_lock_err(class_##_name##_t *_T) \ + { return class_##_name##_lock_err(_T); } + +#define DEFINE_LOCK_GUARD_1_COND_3(_name, _ext, _lock) \ + DEFINE_LOCK_GUARD_1_COND_4(_name, _ext, _lock, _RET) +#define DEFINE_LOCK_GUARD_1_COND(X...) CONCATENATE(DEFINE_LOCK_GUARD_1_COND_, COUNT_ARGS(X))(X) #endif /* _LINUX_CLEANUP_H */ diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 2e6e603b7493..630705a47129 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -1360,6 +1360,32 @@ void clk_hw_unregister(struct clk_hw *hw); /* helper functions */ const char *__clk_get_name(const struct clk *clk); const char *clk_hw_get_name(const struct clk_hw *hw); + +/** + * clk_hw_get_dev() - get device from an hardware clock. + * @hw: the clk_hw pointer to get the struct device from + * + * This is a helper to get the struct device associated with a hardware + * clock. Some clock controllers, such as the one registered with + * CLK_OF_DECLARE(), may have not provided a device pointer while + * registering the clock. + * + * Return: the struct device associated with the clock, or NULL if there + * is none. + */ +struct device *clk_hw_get_dev(const struct clk_hw *hw); + +/** + * clk_hw_get_of_node() - get device_node from a hardware clock. + * @hw: the clk_hw pointer to get the struct device_node from + * + * This is a helper to get the struct device_node associated with a + * hardware clock. + * + * Return: the struct device_node associated with the clock, or NULL + * if there is none. + */ +struct device_node *clk_hw_get_of_node(const struct clk_hw *hw); #ifdef CONFIG_COMMON_CLK struct clk_hw *__clk_get_hw(struct clk *clk); #else diff --git a/include/linux/codetag.h b/include/linux/codetag.h index 5f2b9a1f722c..457ed8fd3214 100644 --- a/include/linux/codetag.h +++ b/include/linux/codetag.h @@ -54,6 +54,7 @@ struct codetag_iterator { struct codetag_module *cmod; unsigned long mod_id; struct codetag *ct; + unsigned long mod_seq; }; #ifdef MODULE diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 32048052c64a..5d07c469b571 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -127,6 +127,8 @@ #define __diag_GCC_8(s) #endif +#define __diag_GCC_all(s) __diag(s) + #define __diag_ignore_all(option, comment) \ __diag(__diag_GCC_ignore option) diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 96e8a66da133..68861da4cf7c 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h @@ -10,7 +10,7 @@ #ifdef CONFIG_COREDUMP struct core_vma_metadata { unsigned long start, end; - unsigned long flags; + vm_flags_t flags; unsigned long dump_size; unsigned long pgoff; struct file *file; diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d381420bbd5f..edfa61d80702 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h @@ -90,7 +90,6 @@ enum cpuhp_state { CPUHP_RADIX_DEAD, CPUHP_PAGE_ALLOC, CPUHP_NET_DEV_DEAD, - CPUHP_PCI_XGENE_DEAD, CPUHP_IOMMU_IOVA_DEAD, CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, CPUHP_PADATA_DEAD, diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 7ae80a7ca81e..ff8f41ab7ce6 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h @@ -355,6 +355,18 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *src) } /** + * cpumask_random - get random cpu in *src. + * @src: cpumask pointer + * + * Return: random set bit, or >= nr_cpu_ids if @src is empty. + */ +static __always_inline +unsigned int cpumask_random(const struct cpumask *src) +{ + return find_random_bit(cpumask_bits(src), nr_cpu_ids); +} + +/** * for_each_cpu - iterate over every cpu in a mask * @cpu: the (optionally unsigned) integer iterator * @mask: the cpumask pointer @@ -547,22 +559,6 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1, } /** - * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd. - * @srcp1: the cpumask pointer - * @srcp2: the cpumask pointer - * @cpu: the Nth cpu to find, starting from 0 - * - * Return: >= nr_cpu_ids if such cpu doesn't exist. - */ -static __always_inline -unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1, - const struct cpumask *srcp2) -{ - return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2), - small_cpumask_bits, cpumask_check(cpu)); -} - -/** * cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd. * @srcp1: the cpumask pointer * @srcp2: the cpumask pointer @@ -609,6 +605,18 @@ void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp) __set_bit(cpumask_check(cpu), cpumask_bits(dstp)); } +/** + * cpumask_clear_cpus - clear cpus in a cpumask + * @dstp: the cpumask pointer + * @cpu: cpu number (< nr_cpu_ids) + * @ncpus: number of cpus to clear (< nr_cpu_ids) + */ +static __always_inline void cpumask_clear_cpus(struct cpumask *dstp, + unsigned int cpu, unsigned int ncpus) +{ + cpumask_check(cpu + ncpus - 1); + bitmap_clear(cpumask_bits(dstp), cpumask_check(cpu), ncpus); +} /** * cpumask_clear_cpu - clear a cpu in a cpumask diff --git a/include/linux/crash_reserve.h b/include/linux/crash_reserve.h index 1fe7e7d1b214..7b44b41d0a20 100644 --- a/include/linux/crash_reserve.h +++ b/include/linux/crash_reserve.h @@ -13,10 +13,23 @@ */ extern struct resource crashk_res; extern struct resource crashk_low_res; +extern struct range crashk_cma_ranges[]; +#if defined(CONFIG_CMA) && defined(CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION) +#define CRASHKERNEL_CMA +#define CRASHKERNEL_CMA_RANGES_MAX 4 +extern int crashk_cma_cnt; +#else +#define crashk_cma_cnt 0 +#define CRASHKERNEL_CMA_RANGES_MAX 0 +#endif + int __init parse_crashkernel(char *cmdline, unsigned long long system_ram, unsigned long long *crash_size, unsigned long long *crash_base, - unsigned long long *low_size, bool *high); + unsigned long long *low_size, unsigned long long *cma_size, + bool *high); + +void __init reserve_crashkernel_cma(unsigned long long cma_size); #ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION #ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE diff --git a/include/linux/crypto.h b/include/linux/crypto.h index b50f1954d1bb..a2137e19be7d 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h @@ -136,6 +136,9 @@ /* Set if the algorithm supports virtual addresses. */ #define CRYPTO_ALG_REQ_VIRT 0x00040000 +/* Set if the algorithm cannot have a fallback (e.g., phmac). */ +#define CRYPTO_ALG_NO_FALLBACK 0x00080000 + /* The high bits 0xff000000 are reserved for type-specific flags. */ /* diff --git a/include/linux/damon.h b/include/linux/damon.h index a4011726cb3b..f13664c62ddd 100644 --- a/include/linux/damon.h +++ b/include/linux/damon.h @@ -448,12 +448,29 @@ struct damos_access_pattern { }; /** + * struct damos_migrate_dests - Migration destination nodes and their weights. + * @node_id_arr: Array of migration destination node ids. + * @weight_arr: Array of migration weights for @node_id_arr. + * @nr_dests: Length of the @node_id_arr and @weight_arr arrays. + * + * @node_id_arr is an array of the ids of migration destination nodes. + * @weight_arr is an array of the weights for those. The weights in + * @weight_arr are for nodes in @node_id_arr of same array index. + */ +struct damos_migrate_dests { + unsigned int *node_id_arr; + unsigned int *weight_arr; + size_t nr_dests; +}; + +/** * struct damos - Represents a Data Access Monitoring-based Operation Scheme. * @pattern: Access pattern of target regions. - * @action: &damo_action to be applied to the target regions. + * @action: &damos_action to be applied to the target regions. * @apply_interval_us: The time between applying the @action. * @quota: Control the aggressiveness of this scheme. * @wmarks: Watermarks for automated (in)activation of this scheme. + * @migrate_dests: Destination nodes if @action is "migrate_{hot,cold}". * @target_nid: Destination node if @action is "migrate_{hot,cold}". * @filters: Additional set of &struct damos_filter for &action. * @ops_filters: ops layer handling &struct damos_filter objects list. @@ -472,9 +489,12 @@ struct damos_access_pattern { * monitoring context are inactive, DAMON stops monitoring either, and just * repeatedly checks the watermarks. * + * @migrate_dests specifies multiple migration target nodes with different + * weights for migrate_hot or migrate_cold actions. @target_nid is ignored if + * this is set. + * * @target_nid is used to set the migration target node for migrate_hot or - * migrate_cold actions, which means it's only meaningful when @action is either - * "migrate_hot" or "migrate_cold". + * migrate_cold actions, and @migrate_dests is unset. * * Before applying the &action to a memory region, &struct damon_operations * implementation could check pages of the region and skip &action to respect @@ -517,7 +537,10 @@ struct damos { struct damos_quota quota; struct damos_watermarks wmarks; union { - int target_nid; + struct { + int target_nid; + struct damos_migrate_dests migrate_dests; + }; }; struct list_head filters; struct list_head ops_filters; @@ -553,6 +576,7 @@ enum damon_ops_id { * @get_scheme_score: Get the score of a region for a scheme. * @apply_scheme: Apply a DAMON-based operation scheme. * @target_valid: Determine if the target is valid. + * @cleanup_target: Clean up each target before deallocation. * @cleanup: Clean up the context. * * DAMON can be extended for various address spaces and usages. For this, @@ -585,6 +609,7 @@ enum damon_ops_id { * filters (&struct damos_filter) that handled by itself. * @target_valid should check whether the target is still valid for the * monitoring. + * @cleanup_target is called before the target will be deallocated. * @cleanup is called from @kdamond just before its termination. */ struct damon_operations { @@ -600,42 +625,16 @@ struct damon_operations { struct damon_target *t, struct damon_region *r, struct damos *scheme, unsigned long *sz_filter_passed); bool (*target_valid)(struct damon_target *t); + void (*cleanup_target)(struct damon_target *t); void (*cleanup)(struct damon_ctx *context); }; -/** - * struct damon_callback - Monitoring events notification callbacks. - * - * @after_wmarks_check: Called after each schemes' watermarks check. - * @after_aggregation: Called after each aggregation. - * @before_terminate: Called before terminating the monitoring. - * - * The monitoring thread (&damon_ctx.kdamond) calls @before_terminate just - * before finishing the monitoring. - * - * The monitoring thread calls @after_wmarks_check after each DAMON-based - * operation schemes' watermarks check. If users need to make changes to the - * attributes of the monitoring context while it's deactivated due to the - * watermarks, this is the good place to do. - * - * The monitoring thread calls @after_aggregation for each of the aggregation - * intervals. Therefore, users can safely access the monitoring results - * without additional protection. For the reason, users are recommended to use - * these callback for the accesses to the results. - * - * If any callback returns non-zero, monitoring stops. - */ -struct damon_callback { - int (*after_wmarks_check)(struct damon_ctx *context); - int (*after_aggregation)(struct damon_ctx *context); - void (*before_terminate)(struct damon_ctx *context); -}; - /* * struct damon_call_control - Control damon_call(). * * @fn: Function to be called back. * @data: Data that will be passed to @fn. + * @repeat: Repeat invocations. * @return_code: Return code from @fn invocation. * * Control damon_call(), which requests specific kdamond to invoke a given @@ -644,19 +643,22 @@ struct damon_callback { struct damon_call_control { int (*fn)(void *data); void *data; + bool repeat; int return_code; /* private: internal use only */ /* informs if the kdamond finished handling of the request */ struct completion completion; /* informs if the kdamond canceled @fn infocation */ bool canceled; + /* List head for siblings. */ + struct list_head list; }; /** * struct damon_intervals_goal - Monitoring intervals auto-tuning goal. * * @access_bp: Access events observation ratio to achieve in bp. - * @aggrs: Number of aggregations to acheive @access_bp within. + * @aggrs: Number of aggregations to achieve @access_bp within. * @min_sample_us: Minimum resulting sampling interval in microseconds. * @max_sample_us: Maximum resulting sampling interval in microseconds. * @@ -697,7 +699,7 @@ struct damon_intervals_goal { * ``mmap()`` calls from the application, in case of virtual memory monitoring) * and applies the changes for each @ops_update_interval. All time intervals * are in micro-seconds. Please refer to &struct damon_operations and &struct - * damon_callback for more detail. + * damon_call_control for more detail. */ struct damon_attrs { unsigned long sample_interval; @@ -744,7 +746,6 @@ struct damon_attrs { * Accesses to other fields must be protected by themselves. * * @ops: Set of monitoring operations for given use cases. - * @callback: Set of callbacks for monitoring events notifications. * * @adaptive_targets: Head of monitoring targets (&damon_target) list. * @schemes: Head of schemes (&damos) list. @@ -775,8 +776,9 @@ struct damon_ctx { /* for scheme quotas prioritization */ unsigned long *regions_score_histogram; - struct damon_call_control *call_control; - struct mutex call_control_lock; + /* lists of &struct damon_call_control */ + struct list_head call_controls; + struct mutex call_controls_lock; struct damos_walk_control *walk_control; struct mutex walk_control_lock; @@ -786,7 +788,6 @@ struct damon_ctx { struct mutex kdamond_lock; struct damon_operations ops; - struct damon_callback callback; struct list_head adaptive_targets; struct list_head schemes; @@ -905,7 +906,7 @@ struct damon_target *damon_new_target(void); void damon_add_target(struct damon_ctx *ctx, struct damon_target *t); bool damon_targets_empty(struct damon_ctx *ctx); void damon_free_target(struct damon_target *t); -void damon_destroy_target(struct damon_target *t); +void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx); unsigned int damon_nr_regions(struct damon_target *t); struct damon_ctx *damon_new_ctx(void); @@ -934,6 +935,7 @@ static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive); int damon_stop(struct damon_ctx **ctxs, int nr_ctxs); +bool damon_is_running(struct damon_ctx *ctx); int damon_call(struct damon_ctx *ctx, struct damon_call_control *control); int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control); diff --git a/include/linux/dax.h b/include/linux/dax.h index 78891518291d..9d624f4d9df6 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -26,7 +26,7 @@ struct dax_operations { * number of pages available for DAX at that pfn. */ long (*direct_access)(struct dax_device *, pgoff_t, long, - enum dax_access_mode, void **, pfn_t *); + enum dax_access_mode, void **, unsigned long *); /* zero_page_range: required operation. Zero page range */ int (*zero_page_range)(struct dax_device *, pgoff_t, size_t); /* @@ -243,7 +243,7 @@ static inline void dax_break_layout_final(struct inode *inode) bool dax_alive(struct dax_device *dax_dev); void *dax_get_private(struct dax_device *dax_dev); long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages, - enum dax_access_mode mode, void **kaddr, pfn_t *pfn); + enum dax_access_mode mode, void **kaddr, unsigned long *pfn); size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, size_t bytes, struct iov_iter *i); size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr, @@ -257,9 +257,10 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size); ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, const struct iomap_ops *ops); vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order, - pfn_t *pfnp, int *errp, const struct iomap_ops *ops); + unsigned long *pfnp, int *errp, + const struct iomap_ops *ops); vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf, - unsigned int order, pfn_t pfn); + unsigned int order, unsigned long pfn); int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); void dax_delete_mapping_range(struct address_space *mapping, loff_t start, loff_t end); diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index cb95951547ab..84fdc3a6a19a 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h @@ -156,7 +156,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti); */ typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff, long nr_pages, enum dax_access_mode node, void **kaddr, - pfn_t *pfn); + unsigned long *pfn); typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff, size_t nr_pages); diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h index 06c4de602b2f..7d40b51933d1 100644 --- a/include/linux/dmapool.h +++ b/include/linux/dmapool.h @@ -60,6 +60,14 @@ static inline struct dma_pool *dma_pool_create(const char *name, NUMA_NO_NODE); } +/** + * dma_pool_zalloc - Get a zero-initialized block of DMA coherent memory. + * @pool: dma pool that will produce the block + * @mem_flags: GFP_* bitmask + * @handle: pointer to dma address of block + * + * Same as dma_pool_alloc(), but the returned memory is zeroed. + */ static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags, dma_addr_t *handle) { diff --git a/include/linux/efi.h b/include/linux/efi.h index e3776d9cad07..a98cc39e7aaa 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h @@ -439,6 +439,7 @@ void efi_native_runtime_setup(void); /* OVMF protocol GUIDs */ #define OVMF_SEV_MEMORY_ACCEPTANCE_PROTOCOL_GUID EFI_GUID(0xc5a010fe, 0x38a7, 0x4531, 0x8a, 0x4a, 0x05, 0x00, 0xd2, 0xfd, 0x16, 0x49) +#define OVMF_MEMORY_LOG_TABLE_GUID EFI_GUID(0x95305139, 0xb20f, 0x4723, 0x84, 0x25, 0x62, 0x7c, 0x88, 0x8f, 0xf1, 0x21) typedef struct { efi_guid_t guid; @@ -642,6 +643,7 @@ extern struct efi { unsigned long esrt; /* ESRT table */ unsigned long tpm_log; /* TPM2 Event Log table */ unsigned long tpm_final_log; /* TPM2 Final Events Log table */ + unsigned long ovmf_debug_log; unsigned long mokvar_table; /* MOK variable config table */ unsigned long coco_secret; /* Confidential computing secret table */ unsigned long unaccepted; /* Unaccepted memory table */ @@ -1344,6 +1346,8 @@ bool efi_config_table_is_usable(const efi_guid_t *guid, unsigned long table) umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n); +int ovmf_log_probe(unsigned long ovmf_debug_log_table); + /* * efivar ops event type */ diff --git a/include/linux/execmem.h b/include/linux/execmem.h index 3be35680a54f..7de229134e30 100644 --- a/include/linux/execmem.h +++ b/include/linux/execmem.h @@ -60,27 +60,11 @@ enum execmem_range_flags { * will trap * @ptr: pointer to memory to fill * @size: size of the range to fill - * @writable: is the memory poited by @ptr is writable or ROX * * A hook for architecures to fill execmem ranges with invalid instructions. * Architectures that use EXECMEM_ROX_CACHE must implement this. */ -void execmem_fill_trapping_insns(void *ptr, size_t size, bool writable); - -/** - * execmem_make_temp_rw - temporarily remap region with read-write - * permissions - * @ptr: address of the region to remap - * @size: size of the region to remap - * - * Remaps a part of the cached large page in the ROX cache in the range - * [@ptr, @ptr + @size) as writable and not executable. The caller must - * have exclusive ownership of this range and ensure nothing will try to - * execute code in this range. - * - * Return: 0 on success or negative error code on failure. - */ -int execmem_make_temp_rw(void *ptr, size_t size); +void execmem_fill_trapping_insns(void *ptr, size_t size); /** * execmem_restore_rox - restore read-only-execute permissions @@ -95,7 +79,6 @@ int execmem_make_temp_rw(void *ptr, size_t size); */ int execmem_restore_rox(void *ptr, size_t size); #else -static inline int execmem_make_temp_rw(void *ptr, size_t size) { return 0; } static inline int execmem_restore_rox(void *ptr, size_t size) { return 0; } #endif @@ -166,6 +149,28 @@ struct execmem_info *execmem_arch_setup(void); void *execmem_alloc(enum execmem_type type, size_t size); /** + * execmem_alloc_rw - allocate writable executable memory + * @type: type of the allocation + * @size: how many bytes of memory are required + * + * Allocates memory that will contain executable code, either generated or + * loaded from kernel modules. + * + * Allocates memory that will contain data coupled with executable code, + * like data sections in kernel modules. + * + * Forces writable permissions on the allocated memory and the caller is + * responsible to manage the permissions afterwards. + * + * For architectures that use ROX cache the permissions will be set to R+W. + * For architectures that don't use ROX cache the default permissions for @type + * will be used as they must be writable. + * + * Return: a pointer to the allocated memory or %NULL + */ +void *execmem_alloc_rw(enum execmem_type type, size_t size); + +/** * execmem_free - free executable memory * @ptr: pointer to the memory that should be freed */ @@ -186,19 +191,6 @@ struct vm_struct *execmem_vmap(size_t size); #endif /** - * execmem_update_copy - copy an update to executable memory - * @dst: destination address to update - * @src: source address containing the data - * @size: how many bytes of memory shold be copied - * - * Copy @size bytes from @src to @dst using text poking if the memory at - * @dst is read-only. - * - * Return: a pointer to @dst or NULL on error - */ -void *execmem_update_copy(void *dst, const void *src, size_t size); - -/** * execmem_is_rox - check if execmem is read-only * @type - the execmem type to check * diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 5206d63b3386..2f8b8bfc0e73 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h @@ -268,7 +268,7 @@ struct node_footer { /* Node IDs in an Indirect Block */ #define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32)) -#define ADDRS_PER_PAGE(page, inode) (addrs_per_page(inode, IS_INODE(page))) +#define ADDRS_PER_PAGE(folio, inode) (addrs_per_page(inode, IS_INODE(folio))) #define NODE_DIR1_BLOCK (DEF_ADDRS_PER_INODE + 1) #define NODE_DIR2_BLOCK (DEF_ADDRS_PER_INODE + 2) diff --git a/include/linux/find.h b/include/linux/find.h index 5a2c267ea7f9..9d720ad92bc1 100644 --- a/include/linux/find.h +++ b/include/linux/find.h @@ -44,6 +44,8 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned long size, unsigned long offset); #endif +unsigned long find_random_bit(const unsigned long *addr, unsigned long size); + #ifndef find_next_bit /** * find_next_bit - find the next set bit in a memory region @@ -268,33 +270,6 @@ unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long * } /** - * find_nth_andnot_bit - find N'th set bit in 2 memory regions, - * flipping bits in 2nd region - * @addr1: The 1st address to start the search at - * @addr2: The 2nd address to start the search at - * @size: The maximum number of bits to search - * @n: The number of set bit, which position is needed, counting from 0 - * - * Returns the bit number of the N'th set bit. - * If no such, returns @size. - */ -static __always_inline -unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2, - unsigned long size, unsigned long n) -{ - if (n >= size) - return size; - - if (small_const_nbits(size)) { - unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0); - - return val ? fns(val, n) : size; - } - - return __find_nth_andnot_bit(addr1, addr2, size, n); -} - -/** * find_nth_and_andnot_bit - find N'th set bit in 2 memory regions, * excluding those set in 3rd region * @addr1: The 1st address to start the search at diff --git a/include/linux/firewire.h b/include/linux/firewire.h index b632eec3ab52..cceb70415ed2 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -136,6 +136,7 @@ struct fw_card { __be32 maint_utility_register; struct workqueue_struct *isoc_wq; + struct workqueue_struct *async_wq; }; static inline struct fw_card *fw_card_get(struct fw_card *card) @@ -307,8 +308,7 @@ struct fw_packet { * For successful transmission, the status code is the ack received * from the destination. Otherwise it is one of the juju-specific * rcodes: RCODE_SEND_ERROR, _CANCELLED, _BUSY, _GENERATION, _NO_ACK. - * The callback can be called from tasklet context and thus - * must never block. + * The callback can be called from workqueue and thus must never block. */ fw_packet_callback_t callback; int ack; @@ -381,6 +381,10 @@ void __fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode * * A variation of __fw_send_request() to generate callback for response subaction without time * stamp. + * + * The callback is invoked in the workqueue context in most cases. However, if an error is detected + * before queueing or the destination address refers to the local node, it is invoked in the + * current context instead. */ static inline void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, @@ -410,6 +414,10 @@ static inline void fw_send_request(struct fw_card *card, struct fw_transaction * * @callback_data: data to be passed to the transaction completion callback * * A variation of __fw_send_request() to generate callback for response subaction with time stamp. + * + * The callback is invoked in the workqueue context in most cases. However, if an error is detected + * before queueing or the destination address refers to the local node, it is invoked in the current + * context instead. */ static inline void fw_send_request_with_tstamp(struct fw_card *card, struct fw_transaction *t, int tcode, int destination_id, int generation, int speed, unsigned long long offset, diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h deleted file mode 100644 index 141ac3f251e6..000000000000 --- a/include/linux/fpga/adi-axi-common.h +++ /dev/null @@ -1,23 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Analog Devices AXI common registers & definitions - * - * Copyright 2019 Analog Devices Inc. - * - * https://wiki.analog.com/resources/fpga/docs/axi_ip - * https://wiki.analog.com/resources/fpga/docs/hdl/regmap - */ - -#ifndef ADI_AXI_COMMON_H_ -#define ADI_AXI_COMMON_H_ - -#define ADI_AXI_REG_VERSION 0x0000 - -#define ADI_AXI_PCORE_VER(major, minor, patch) \ - (((major) << 16) | ((minor) << 8) | (patch)) - -#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff) -#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff) -#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff) - -#endif /* ADI_AXI_COMMON_H_ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 2ec4807d4ea8..d7ab4f96d705 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -200,12 +200,12 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* * The two FMODE_NONOTIFY* define which fsnotify events should not be generated - * for a file. These are the possible values of (f->f_mode & - * FMODE_FSNOTIFY_MASK) and their meaning: + * for an open file. These are the possible values of + * (f->f_mode & FMODE_FSNOTIFY_MASK) and their meaning: * * FMODE_NONOTIFY - suppress all (incl. non-permission) events. * FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events. - * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only pre-content events. + * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only FAN_ACCESS_PERM. */ #define FMODE_FSNOTIFY_MASK \ (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM) @@ -213,13 +213,13 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define FMODE_FSNOTIFY_NONE(mode) \ ((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY) #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS -#define FMODE_FSNOTIFY_PERM(mode) \ +#define FMODE_FSNOTIFY_HSM(mode) \ ((mode & FMODE_FSNOTIFY_MASK) == 0 || \ (mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)) -#define FMODE_FSNOTIFY_HSM(mode) \ +#define FMODE_FSNOTIFY_ACCESS_PERM(mode) \ ((mode & FMODE_FSNOTIFY_MASK) == 0) #else -#define FMODE_FSNOTIFY_PERM(mode) 0 +#define FMODE_FSNOTIFY_ACCESS_PERM(mode) 0 #define FMODE_FSNOTIFY_HSM(mode) 0 #endif @@ -526,7 +526,7 @@ struct address_space { /* * On most architectures that alignment is already the case; but * must be enforced here for CRIS, to let the least significant bit - * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON. + * of struct folio's "mapping" pointer be used for FOLIO_MAPPING_ANON. */ /* XArray tags, for tagging dirty and writeback pages in the pagecache. */ @@ -1043,6 +1043,7 @@ struct fown_struct { * and so were/are genuinely "ahead". Start next readahead when * the first of these pages is accessed. * @ra_pages: Maximum size of a readahead request, copied from the bdi. + * @order: Preferred folio order used for most recent readahead. * @mmap_miss: How many mmap accesses missed in the page cache. * @prev_pos: The last byte in the most recent read request. * @@ -1054,7 +1055,8 @@ struct file_ra_state { unsigned int size; unsigned int async_size; unsigned int ra_pages; - unsigned int mmap_miss; + unsigned short order; + unsigned short mmap_miss; loff_t prev_pos; }; @@ -3756,9 +3758,14 @@ void setattr_copy(struct mnt_idmap *, struct inode *inode, extern int file_update_time(struct file *file); +static inline bool file_is_dax(const struct file *file) +{ + return file && IS_DAX(file->f_mapping->host); +} + static inline bool vma_is_dax(const struct vm_area_struct *vma) { - return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host); + return file_is_dax(vma->vm_file); } static inline bool vma_is_fsdax(struct vm_area_struct *vma) diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 8d0e3ad89b94..10dd161690a2 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h @@ -332,12 +332,13 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) return (struct page *)page_private(bounce_page); } -static inline bool fscrypt_is_bounce_folio(struct folio *folio) +static inline bool fscrypt_is_bounce_folio(const struct folio *folio) { return folio->mapping == NULL; } -static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio) +static inline +struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio) { return bounce_folio->private; } @@ -517,12 +518,13 @@ static inline struct page *fscrypt_pagecache_page(struct page *bounce_page) return ERR_PTR(-EINVAL); } -static inline bool fscrypt_is_bounce_folio(struct folio *folio) +static inline bool fscrypt_is_bounce_folio(const struct folio *folio) { return false; } -static inline struct folio *fscrypt_pagecache_folio(struct folio *bounce_folio) +static inline +struct folio *fscrypt_pagecache_folio(const struct folio *bounce_folio) { WARN_ON_ONCE(1); return ERR_PTR(-EINVAL); diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 454d8e466958..28a9cb13fbfa 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h @@ -129,7 +129,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask) #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS -void file_set_fsnotify_mode_from_watchers(struct file *file); +int fsnotify_open_perm_and_set_mode(struct file *file); /* * fsnotify_file_area_perm - permission hook before access to file range @@ -147,9 +147,6 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS))) return 0; - if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode))) - return 0; - /* * read()/write() and other types of access generate pre-content events. */ @@ -160,7 +157,8 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, return ret; } - if (!(perm_mask & MAY_READ)) + if (!(perm_mask & MAY_READ) || + likely(!FMODE_FSNOTIFY_ACCESS_PERM(file->f_mode))) return 0; /* @@ -208,28 +206,10 @@ static inline int fsnotify_file_perm(struct file *file, int perm_mask) return fsnotify_file_area_perm(file, perm_mask, NULL, 0); } -/* - * fsnotify_open_perm - permission hook before file open - */ -static inline int fsnotify_open_perm(struct file *file) -{ - int ret; - - if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode))) - return 0; - - if (file->f_flags & __FMODE_EXEC) { - ret = fsnotify_path(&file->f_path, FS_OPEN_EXEC_PERM); - if (ret) - return ret; - } - - return fsnotify_path(&file->f_path, FS_OPEN_PERM); -} - #else -static inline void file_set_fsnotify_mode_from_watchers(struct file *file) +static inline int fsnotify_open_perm_and_set_mode(struct file *file) { + return 0; } static inline int fsnotify_file_area_perm(struct file *file, int perm_mask, @@ -253,11 +233,6 @@ static inline int fsnotify_file_perm(struct file *file, int perm_mask) { return 0; } - -static inline int fsnotify_open_perm(struct file *file) -{ - return 0; -} #endif /* diff --git a/include/linux/gcd.h b/include/linux/gcd.h index cb572677fd7f..616e81a7f7e3 100644 --- a/include/linux/gcd.h +++ b/include/linux/gcd.h @@ -3,6 +3,9 @@ #define _GCD_H #include <linux/compiler.h> +#include <linux/jump_label.h> + +DECLARE_STATIC_KEY_TRUE(efficient_ffs_key); unsigned long gcd(unsigned long a, unsigned long b) __attribute_const__; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index be160e8d8bcb..5ebf26fcdcfa 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -423,9 +423,14 @@ static inline bool gfp_compaction_allowed(gfp_t gfp_mask) extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma); #ifdef CONFIG_CONTIG_ALLOC + +typedef unsigned int __bitwise acr_flags_t; +#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request +#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA + /* The below functions must be run on a range from a single zone. */ extern int alloc_contig_range_noprof(unsigned long start, unsigned long end, - unsigned migratetype, gfp_t gfp_mask); + acr_flags_t alloc_flags, gfp_t gfp_mask); #define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__)) extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask, diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 4b984e8f8fcd..667f8fd58a79 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h @@ -347,12 +347,10 @@ struct gpio_irq_chip { * @get: returns value for signal "offset", 0=low, 1=high, or negative error * @get_multiple: reads values for multiple signals defined by "mask" and * stores them in "bits", returns 0 on success or negative error - * @set: **DEPRECATED** - please use set_rv() instead - * @set_multiple: **DEPRECATED** - please use set_multiple_rv() instead - * @set_rv: assigns output value for signal "offset", returns 0 on success or - * negative error value - * @set_multiple_rv: assigns output values for multiple signals defined by - * "mask", returns 0 on success or negative error value + * @set: assigns output value for signal "offset", returns 0 on success or + * negative error value + * @set_multiple: assigns output values for multiple signals defined by + * "mask", returns 0 on success or negative error value * @set_config: optional hook for all kinds of settings. Uses the same * packed config format as generic pinconf. Must return 0 on success and * a negative error number on failure. @@ -445,17 +443,11 @@ struct gpio_chip { int (*get_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); - void (*set)(struct gpio_chip *gc, - unsigned int offset, int value); - void (*set_multiple)(struct gpio_chip *gc, + int (*set)(struct gpio_chip *gc, + unsigned int offset, int value); + int (*set_multiple)(struct gpio_chip *gc, unsigned long *mask, unsigned long *bits); - int (*set_rv)(struct gpio_chip *gc, - unsigned int offset, - int value); - int (*set_multiple_rv)(struct gpio_chip *gc, - unsigned long *mask, - unsigned long *bits); int (*set_config)(struct gpio_chip *gc, unsigned int offset, unsigned long config); diff --git a/include/linux/gpio/generic.h b/include/linux/gpio/generic.h index b511acd58ab0..f3a8db4598bb 100644 --- a/include/linux/gpio/generic.h +++ b/include/linux/gpio/generic.h @@ -88,10 +88,10 @@ static inline int gpio_generic_chip_set(struct gpio_generic_chip *chip, unsigned int offset, int value) { - if (WARN_ON(!chip->gc.set_rv)) + if (WARN_ON(!chip->gc.set)) return -EOPNOTSUPP; - return chip->gc.set_rv(&chip->gc, offset, value); + return chip->gc.set(&chip->gc, offset, value); } #define gpio_generic_chip_lock(gen_gc) \ diff --git a/include/linux/hid.h b/include/linux/hid.h index 568a9d8c749b..2cc4f1e4ea96 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -1216,7 +1216,11 @@ static inline void hid_hw_wait(struct hid_device *hdev) /** * hid_report_len - calculate the report length * - * @report: the report we want to know the length + * @report: the report whose length we want to know + * + * The length counts the report ID byte, but only if the ID is nonzero + * and therefore is included in the report. Reports whose ID is zero + * never include an ID byte. */ static inline u32 hid_report_len(struct hid_report *report) { @@ -1239,6 +1243,8 @@ void hid_quirks_exit(__u16 bus); dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_warn(hid, fmt, ...) \ dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__) +#define hid_warn_ratelimited(hid, fmt, ...) \ + dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_info(hid, fmt, ...) \ dev_info(&(hid)->dev, fmt, ##__VA_ARGS__) #define hid_dbg(hid, fmt, ...) \ diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h index 9a7683d79a4b..36053c3d6d64 100644 --- a/include/linux/highmem-internal.h +++ b/include/linux/highmem-internal.h @@ -195,7 +195,7 @@ static inline void *kmap_local_page_try_from_panic(struct page *page) static inline void *kmap_local_folio(struct folio *folio, size_t offset) { - return page_address(&folio->page) + offset; + return folio_address(folio) + offset; } static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot) diff --git a/include/linux/highmem.h b/include/linux/highmem.h index e48d7f27b0b9..6234f316468c 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -292,12 +292,6 @@ static inline void zero_user_segment(struct page *page, zero_user_segments(page, start, end, 0, 0); } -static inline void zero_user(struct page *page, - unsigned start, unsigned size) -{ - zero_user_segments(page, start, start + size, 0, 0); -} - #ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE static inline void copy_user_highpage(struct page *to, struct page *from, @@ -688,10 +682,4 @@ static inline void folio_release_kmap(struct folio *folio, void *addr) kunmap_local(addr); folio_put(folio); } - -static inline void unmap_and_put_page(struct page *page, void *addr) -{ - folio_release_kmap(page_folio(page), addr); -} - #endif /* _LINUX_HIGHMEM_H */ diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h index 99fcf65d575f..0c4c84b8c3be 100644 --- a/include/linux/hisi_acc_qm.h +++ b/include/linux/hisi_acc_qm.h @@ -556,9 +556,9 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue, struct hisi_acc_sgl_pool; struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev, struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool, - u32 index, dma_addr_t *hw_sgl_dma); + u32 index, dma_addr_t *hw_sgl_dma, enum dma_data_direction dir); void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl, - struct hisi_acc_hw_sgl *hw_sgl); + struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir); struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev, u32 count, u32 sge_nr); void hisi_acc_free_sgl_pool(struct device *dev, diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 2f190c90192d..7748489fde1b 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h @@ -37,8 +37,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr, pgprot_t newprot, unsigned long cp_flags); -vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write); -vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write); +vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn, + bool write); +vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn, + bool write); vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio, bool write); vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio, @@ -261,7 +263,7 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, } unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, - unsigned long vm_flags, + vm_flags_t vm_flags, unsigned long tva_flags, unsigned long orders); @@ -282,7 +284,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma, */ static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, - unsigned long vm_flags, + vm_flags_t vm_flags, unsigned long tva_flags, unsigned long orders) { @@ -317,7 +319,7 @@ struct thpsize { (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG)) static inline bool vma_thp_disabled(struct vm_area_struct *vma, - unsigned long vm_flags) + vm_flags_t vm_flags) { /* * Explicitly disabled through madvise or prctl, or some @@ -400,8 +402,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, #define split_huge_pmd(__vma, __pmd, __address) \ do { \ pmd_t *____pmd = (__pmd); \ - if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \ - || pmd_devmap(*____pmd)) \ + if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)) \ __split_huge_pmd(__vma, __pmd, __address, \ false); \ } while (0) @@ -426,16 +427,14 @@ change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, #define split_huge_pud(__vma, __pud, __address) \ do { \ pud_t *____pud = (__pud); \ - if (pud_trans_huge(*____pud) \ - || pud_devmap(*____pud)) \ + if (pud_trans_huge(*____pud)) \ __split_huge_pud(__vma, __pud, __address); \ } while (0) -int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags, +int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags, int advice); -int madvise_collapse(struct vm_area_struct *vma, - struct vm_area_struct **prev, - unsigned long start, unsigned long end); +int madvise_collapse(struct vm_area_struct *vma, unsigned long start, + unsigned long end, bool *lock_dropped); void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start, unsigned long end, struct vm_area_struct *next); spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma); @@ -450,7 +449,7 @@ static inline int is_swap_pmd(pmd_t pmd) static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma) { - if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) + if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd)) return __pmd_trans_huge_lock(pmd, vma); else return NULL; @@ -458,7 +457,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd, static inline spinlock_t *pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma) { - if (pud_trans_huge(*pud) || pud_devmap(*pud)) + if (pud_trans_huge(*pud)) return __pud_trans_huge_lock(pud, vma); else return NULL; @@ -473,9 +472,6 @@ static inline bool folio_test_pmd_mappable(struct folio *folio) return folio_order(folio) >= HPAGE_PMD_ORDER; } -struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr, - pmd_t *pmd, int flags, struct dev_pagemap **pgmap); - vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf); extern struct folio *huge_zero_folio; @@ -486,9 +482,14 @@ static inline bool is_huge_zero_folio(const struct folio *folio) return READ_ONCE(huge_zero_folio) == folio; } +static inline bool is_huge_zero_pfn(unsigned long pfn) +{ + return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1)); +} + static inline bool is_huge_zero_pmd(pmd_t pmd) { - return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd); + return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd)); } struct folio *mm_get_huge_zero_folio(struct mm_struct *mm); @@ -524,7 +525,7 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma, } static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma, - unsigned long vm_flags, + vm_flags_t vm_flags, unsigned long tva_flags, unsigned long orders) { @@ -593,14 +594,14 @@ static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma, do { } while (0) static inline int hugepage_madvise(struct vm_area_struct *vma, - unsigned long *vm_flags, int advice) + vm_flags_t *vm_flags, int advice) { return -EINVAL; } static inline int madvise_collapse(struct vm_area_struct *vma, - struct vm_area_struct **prev, - unsigned long start, unsigned long end) + unsigned long start, + unsigned long end, bool *lock_dropped) { return -EINVAL; } @@ -636,6 +637,11 @@ static inline bool is_huge_zero_folio(const struct folio *folio) return false; } +static inline bool is_huge_zero_pfn(unsigned long pfn) +{ + return false; +} + static inline bool is_huge_zero_pmd(pmd_t pmd) { return false; diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 42f374e828a2..526d27e88b3b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -149,7 +149,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte, uffd_flags_t flags, struct folio **foliop); #endif /* CONFIG_USERFAULTFD */ -bool hugetlb_reserve_pages(struct inode *inode, long from, long to, +long hugetlb_reserve_pages(struct inode *inode, long from, long to, struct vm_area_struct *vma, vm_flags_t vm_flags); long hugetlb_unreserve_pages(struct inode *inode, long start, long end, @@ -359,12 +359,6 @@ static inline void hugetlb_show_meminfo_node(int nid) { } -static inline int prepare_hugepage_range(struct file *file, - unsigned long addr, unsigned long len) -{ - return -EINVAL; -} - static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma) { } @@ -396,13 +390,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm, return 0; } -static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, - unsigned long addr, unsigned long end, - unsigned long floor, unsigned long ceiling) -{ - BUG(); -} - #ifdef CONFIG_USERFAULTFD static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte, struct vm_area_struct *dst_vma, @@ -740,6 +727,11 @@ extern unsigned int default_hstate_idx; #define default_hstate (hstates[default_hstate_idx]) +static inline struct hugepage_subpool *subpool_inode(struct inode *inode) +{ + return HUGETLBFS_SB(inode->i_sb)->spool; +} + static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) { return folio->_hugetlb_subpool; diff --git a/include/linux/hung_task.h b/include/linux/hung_task.h index 1bc2b3244613..34e615c76ca5 100644 --- a/include/linux/hung_task.h +++ b/include/linux/hung_task.h @@ -21,17 +21,17 @@ * type. * * Type encoding: - * 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX) - * 01 - Blocked on semaphore (BLOCKER_TYPE_SEM) - * 10 - Blocked on rt-mutex (BLOCKER_TYPE_RTMUTEX) - * 11 - Blocked on rw-semaphore (BLOCKER_TYPE_RWSEM) + * 00 - Blocked on mutex (BLOCKER_TYPE_MUTEX) + * 01 - Blocked on semaphore (BLOCKER_TYPE_SEM) + * 10 - Blocked on rw-semaphore as READER (BLOCKER_TYPE_RWSEM_READER) + * 11 - Blocked on rw-semaphore as WRITER (BLOCKER_TYPE_RWSEM_WRITER) */ -#define BLOCKER_TYPE_MUTEX 0x00UL -#define BLOCKER_TYPE_SEM 0x01UL -#define BLOCKER_TYPE_RTMUTEX 0x02UL -#define BLOCKER_TYPE_RWSEM 0x03UL +#define BLOCKER_TYPE_MUTEX 0x00UL +#define BLOCKER_TYPE_SEM 0x01UL +#define BLOCKER_TYPE_RWSEM_READER 0x02UL +#define BLOCKER_TYPE_RWSEM_WRITER 0x03UL -#define BLOCKER_TYPE_MASK 0x03UL +#define BLOCKER_TYPE_MASK 0x03UL #ifdef CONFIG_DETECT_HUNG_TASK_BLOCKER static inline void hung_task_set_blocker(void *lock, unsigned long type) diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h index 9efbc54e35e5..be5417303ecf 100644 --- a/include/linux/hypervisor.h +++ b/include/linux/hypervisor.h @@ -37,6 +37,9 @@ static inline bool hypervisor_isolated_pci_functions(void) if (IS_ENABLED(CONFIG_S390)) return true; + if (IS_ENABLED(CONFIG_LOONGARCH)) + return true; + return jailhouse_paravirt(); } diff --git a/include/linux/i3c/device.h b/include/linux/i3c/device.h index b674f64d0822..7f136de4b73e 100644 --- a/include/linux/i3c/device.h +++ b/include/linux/i3c/device.h @@ -245,7 +245,7 @@ void i3c_driver_unregister(struct i3c_driver *drv); * * Return: 0 if both registrations succeeds, a negative error code otherwise. */ -static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv, +static __always_inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv, struct i2c_driver *i2cdrv) { int ret; @@ -270,7 +270,7 @@ static inline int i3c_i2c_driver_register(struct i3c_driver *i3cdrv, * Note that when CONFIG_I3C is not enabled, this function only unregisters the * @i2cdrv. */ -static inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv, +static __always_inline void i3c_i2c_driver_unregister(struct i3c_driver *i3cdrv, struct i2c_driver *i2cdrv) { if (IS_ENABLED(CONFIG_I3C)) diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h index c67922ece617..043f5c7ff398 100644 --- a/include/linux/i3c/master.h +++ b/include/linux/i3c/master.h @@ -249,10 +249,15 @@ struct i3c_device { */ #define I3C_BUS_MAX_DEVS 11 -#define I3C_BUS_MAX_I3C_SCL_RATE 12900000 -#define I3C_BUS_TYP_I3C_SCL_RATE 12500000 -#define I3C_BUS_I2C_FM_PLUS_SCL_RATE 1000000 -#define I3C_BUS_I2C_FM_SCL_RATE 400000 +/* Taken from the I3C Spec V1.1.1, chapter 6.2. "Timing specification" */ +#define I3C_BUS_I2C_FM_PLUS_SCL_MAX_RATE 1000000 +#define I3C_BUS_I2C_FM_SCL_MAX_RATE 400000 +#define I3C_BUS_I3C_SCL_MAX_RATE 12900000 +#define I3C_BUS_I3C_SCL_TYP_RATE 12500000 +#define I3C_BUS_TAVAL_MIN_NS 1000 +#define I3C_BUS_TBUF_MIXED_FM_MIN_NS 1300 +#define I3C_BUS_THIGH_MIXED_MAX_NS 41 +#define I3C_BUS_TIDLE_MIN_NS 200000 #define I3C_BUS_TLOW_OD_MIN_NS 200 /** diff --git a/include/linux/input/touch-overlay.h b/include/linux/input/touch-overlay.h new file mode 100644 index 000000000000..0253e554d3cd --- /dev/null +++ b/include/linux/input/touch-overlay.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023 Javier Carrasco <javier.carrasco@wolfvision.net> + */ + +#ifndef _TOUCH_OVERLAY +#define _TOUCH_OVERLAY + +#include <linux/types.h> + +struct input_dev; + +int touch_overlay_map(struct list_head *list, struct input_dev *input); + +void touch_overlay_get_touchscreen_abs(struct list_head *list, u16 *x, u16 *y); + +bool touch_overlay_mapped_touchscreen(struct list_head *list); + +bool touch_overlay_process_contact(struct list_head *list, + struct input_dev *input, + struct input_mt_pos *pos, int slot); + +void touch_overlay_sync_frame(struct list_head *list, struct input_dev *input); + +#endif diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 7376c1df9c90..c16353cc6e3c 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h @@ -225,7 +225,4 @@ io_mapping_free(struct io_mapping *iomap) kfree(iomap); } -int io_mapping_map_user(struct io_mapping *iomap, struct vm_area_struct *vma, - unsigned long addr, unsigned long pfn, unsigned long size); - #endif /* _LINUX_IO_MAPPING_H */ diff --git a/include/linux/iommu.h b/include/linux/iommu.h index 7073be1d8841..c30d12e16473 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h @@ -14,6 +14,7 @@ #include <linux/err.h> #include <linux/of.h> #include <linux/iova_bitmap.h> +#include <uapi/linux/iommufd.h> #define IOMMU_READ (1 << 0) #define IOMMU_WRITE (1 << 1) @@ -558,12 +559,52 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, } /** + * __iommu_copy_struct_to_user - Report iommu driver specific user space data + * @dst_data: Pointer to a struct iommu_user_data for user space data location + * @src_data: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @data_type: The data type of the @src_data. Must match with @dst_data.type + * @data_len: Length of current user data structure, i.e. sizeof(struct _src) + * @min_len: Initial length of user data structure for backward compatibility. + * This should be offsetofend using the last member in the user data + * struct that was initially added to include/uapi/linux/iommufd.h + */ +static inline int +__iommu_copy_struct_to_user(const struct iommu_user_data *dst_data, + void *src_data, unsigned int data_type, + size_t data_len, size_t min_len) +{ + if (WARN_ON(!dst_data || !src_data)) + return -EINVAL; + if (dst_data->type != data_type) + return -EINVAL; + if (dst_data->len < min_len || data_len < dst_data->len) + return -EINVAL; + return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data, + data_len, NULL); +} + +/** + * iommu_copy_struct_to_user - Report iommu driver specific user space data + * @user_data: Pointer to a struct iommu_user_data for user space data location + * @ksrc: Pointer to an iommu driver specific user data that is defined in + * include/uapi/linux/iommufd.h + * @data_type: The data type of the @ksrc. Must match with @user_data->type + * @min_last: The last member of the data structure @ksrc points in the initial + * version. + * Return 0 for success, otherwise -error. + */ +#define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last) \ + __iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \ + offsetofend(typeof(*ksrc), min_last)) + +/** * struct iommu_ops - iommu ops and capabilities * @capable: check capability * @hw_info: report iommu hardware information. The data buffer returned by this * op is allocated in the iommu driver and freed by the caller after - * use. The information type is one of enum iommu_hw_info_type defined - * in include/uapi/linux/iommufd.h. + * use. @type can input a requested type and output a supported type. + * Driver should reject an unsupported data @type input * @domain_alloc: Do not use in new drivers * @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to * use identity_domain instead. This should only be used @@ -596,14 +637,16 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, * - IOMMU_DOMAIN_DMA: must use a dma domain * - 0: use the default setting * @default_domain_ops: the default ops for domains - * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind - * the @dev, as the set of virtualization resources shared/passed - * to user space IOMMU instance. And associate it with a nesting - * @parent_domain. The @viommu_type must be defined in the header - * include/uapi/linux/iommufd.h - * It is required to call iommufd_viommu_alloc() helper for - * a bundled allocation of the core and the driver structures, - * using the given @ictx pointer. + * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given + * @dev corresponding to @viommu_type. Driver should return 0 + * if vIOMMU isn't supported accordingly. It is required for + * driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the + * driver-level vIOMMU structure related to the core one + * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical + * IOMMU instance @viommu->iommu_dev, as the set of virtualization + * resources shared/passed to user space IOMMU instance. Associate + * it with a nesting @parent_domain. It is required for driver to + * set @viommu->ops pointing to its own viommu_ops * @owner: Driver module providing these ops * @identity_domain: An always available, always attachable identity * translation. @@ -619,7 +662,8 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size, */ struct iommu_ops { bool (*capable)(struct device *dev, enum iommu_cap); - void *(*hw_info)(struct device *dev, u32 *length, u32 *type); + void *(*hw_info)(struct device *dev, u32 *length, + enum iommu_hw_info_type *type); /* Domain allocation and freeing by the iommu driver */ #if IS_ENABLED(CONFIG_FSL_PAMU) @@ -653,9 +697,11 @@ struct iommu_ops { int (*def_domain_type)(struct device *dev); - struct iommufd_viommu *(*viommu_alloc)( - struct device *dev, struct iommu_domain *parent_domain, - struct iommufd_ctx *ictx, unsigned int viommu_type); + size_t (*get_viommu_size)(struct device *dev, + enum iommu_viommu_type viommu_type); + int (*viommu_init)(struct iommufd_viommu *viommu, + struct iommu_domain *parent_domain, + const struct iommu_user_data *user_data); const struct iommu_domain_ops *default_domain_ops; struct module *owner; diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h index 34b6e6ca4bfa..6e7efe83bc5d 100644 --- a/include/linux/iommufd.h +++ b/include/linux/iommufd.h @@ -37,6 +37,7 @@ enum iommufd_object_type { IOMMUFD_OBJ_VIOMMU, IOMMUFD_OBJ_VDEVICE, IOMMUFD_OBJ_VEVENTQ, + IOMMUFD_OBJ_HW_QUEUE, #ifdef CONFIG_IOMMUFD_TEST IOMMUFD_OBJ_SELFTEST, #endif @@ -45,7 +46,13 @@ enum iommufd_object_type { /* Base struct for all objects with a userspace ID handle. */ struct iommufd_object { - refcount_t shortterm_users; + /* + * Destroy will sleep and wait for wait_cnt to go to zero. This allows + * concurrent users of the ID to reliably avoid causing a spurious + * destroy failure. Incrementing this count should either be short + * lived or be revoked and blocked during pre_destroy(). + */ + refcount_t wait_cnt; refcount_t users; enum iommufd_object_type type; unsigned int id; @@ -101,7 +108,36 @@ struct iommufd_viommu { struct list_head veventqs; struct rw_semaphore veventqs_rwsem; - unsigned int type; + enum iommu_viommu_type type; +}; + +struct iommufd_vdevice { + struct iommufd_object obj; + struct iommufd_viommu *viommu; + struct iommufd_device *idev; + + /* + * Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID of + * AMD IOMMU, and vRID of Intel VT-d + */ + u64 virt_id; + + /* Clean up all driver-specific parts of an iommufd_vdevice */ + void (*destroy)(struct iommufd_vdevice *vdev); +}; + +struct iommufd_hw_queue { + struct iommufd_object obj; + struct iommufd_viommu *viommu; + struct iommufd_access *access; + + u64 base_addr; /* in guest physical address space */ + size_t length; + + enum iommu_hw_queue_type type; + + /* Clean up all driver-specific parts of an iommufd_hw_queue */ + void (*destroy)(struct iommufd_hw_queue *hw_queue); }; /** @@ -120,6 +156,30 @@ struct iommufd_viommu { * array->entry_num to report the number of handled requests. * The data structure of the array entry must be defined in * include/uapi/linux/iommufd.h + * @vdevice_size: Size of the driver-defined vDEVICE structure per this vIOMMU + * @vdevice_init: Initialize the driver-level structure of a vDEVICE object, or + * related HW procedure. @vdev is already initialized by iommufd + * core: vdev->dev and vdev->viommu pointers; vdev->id carries a + * per-vIOMMU virtual ID (refer to struct iommu_vdevice_alloc in + * include/uapi/linux/iommufd.h) + * If driver has a deinit function to revert what vdevice_init op + * does, it should set it to the @vdev->destroy function pointer + * @get_hw_queue_size: Get the size of a driver-defined HW queue structure for a + * given @viommu corresponding to @queue_type. Driver should + * return 0 if HW queue aren't supported accordingly. It is + * required for driver to use the HW_QUEUE_STRUCT_SIZE macro + * to sanitize the driver-level HW queue structure related + * to the core one + * @hw_queue_init_phys: Initialize the driver-level structure of a HW queue that + * is initialized with its core-level structure that holds + * all the info about a guest queue memory. + * Driver providing this op indicates that HW accesses the + * guest queue memory via physical addresses. + * @index carries the logical HW QUEUE ID per vIOMMU in a + * guest VM, for a multi-queue model. @base_addr_pa carries + * the physical location of the guest queue + * If driver has a deinit function to revert what this op + * does, it should set it to the @hw_queue->destroy pointer */ struct iommufd_viommu_ops { void (*destroy)(struct iommufd_viommu *viommu); @@ -128,6 +188,13 @@ struct iommufd_viommu_ops { const struct iommu_user_data *user_data); int (*cache_invalidate)(struct iommufd_viommu *viommu, struct iommu_user_data_array *array); + const size_t vdevice_size; + int (*vdevice_init)(struct iommufd_vdevice *vdev); + size_t (*get_hw_queue_size)(struct iommufd_viommu *viommu, + enum iommu_hw_queue_type queue_type); + /* AMD's HW will add hw_queue_init simply using @hw_queue->base_addr */ + int (*hw_queue_init_phys)(struct iommufd_hw_queue *hw_queue, u32 index, + phys_addr_t base_addr_pa); }; #if IS_ENABLED(CONFIG_IOMMUFD) @@ -171,8 +238,9 @@ static inline void iommufd_access_unpin_pages(struct iommufd_access *access, { } -static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova, - void *data, size_t len, unsigned int flags) +static inline int iommufd_access_rw(struct iommufd_access *access, + unsigned long iova, void *data, size_t len, + unsigned int flags) { return -EOPNOTSUPP; } @@ -189,9 +257,16 @@ static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx) #endif /* CONFIG_IOMMUFD */ #if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE) -struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx, - size_t size, - enum iommufd_object_type type); +int _iommufd_object_depend(struct iommufd_object *obj_dependent, + struct iommufd_object *obj_depended); +void _iommufd_object_undepend(struct iommufd_object *obj_dependent, + struct iommufd_object *obj_depended); +int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner, + phys_addr_t mmio_addr, size_t length, + unsigned long *offset); +void _iommufd_destroy_mmap(struct iommufd_ctx *ictx, + struct iommufd_object *owner, unsigned long offset); +struct device *iommufd_vdevice_to_device(struct iommufd_vdevice *vdev); struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu, unsigned long vdev_id); int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu, @@ -200,11 +275,36 @@ int iommufd_viommu_report_event(struct iommufd_viommu *viommu, enum iommu_veventq_type type, void *event_data, size_t data_len); #else /* !CONFIG_IOMMUFD_DRIVER_CORE */ -static inline struct iommufd_object * -_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size, - enum iommufd_object_type type) +static inline int _iommufd_object_depend(struct iommufd_object *obj_dependent, + struct iommufd_object *obj_depended) +{ + return -EOPNOTSUPP; +} + +static inline void +_iommufd_object_undepend(struct iommufd_object *obj_dependent, + struct iommufd_object *obj_depended) +{ +} + +static inline int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, + struct iommufd_object *owner, + phys_addr_t mmio_addr, size_t length, + unsigned long *offset) +{ + return -EOPNOTSUPP; +} + +static inline void _iommufd_destroy_mmap(struct iommufd_ctx *ictx, + struct iommufd_object *owner, + unsigned long offset) { - return ERR_PTR(-EOPNOTSUPP); +} + +static inline struct device * +iommufd_vdevice_to_device(struct iommufd_vdevice *vdev) +{ + return NULL; } static inline struct device * @@ -228,21 +328,73 @@ static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu, } #endif /* CONFIG_IOMMUFD_DRIVER_CORE */ +#define VIOMMU_STRUCT_SIZE(drv_struct, member) \ + (sizeof(drv_struct) + \ + BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \ + BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_viommu, \ + ((drv_struct *)NULL)->member))) + +#define VDEVICE_STRUCT_SIZE(drv_struct, member) \ + (sizeof(drv_struct) + \ + BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \ + BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_vdevice, \ + ((drv_struct *)NULL)->member))) + +#define HW_QUEUE_STRUCT_SIZE(drv_struct, member) \ + (sizeof(drv_struct) + \ + BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \ + BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_hw_queue, \ + ((drv_struct *)NULL)->member))) + /* - * Helpers for IOMMU driver to allocate driver structures that will be freed by - * the iommufd core. The free op will be called prior to freeing the memory. + * Helpers for IOMMU driver to build/destroy a dependency between two sibling + * structures created by one of the allocators above */ -#define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops) \ +#define iommufd_hw_queue_depend(dependent, depended, member) \ ({ \ - drv_struct *ret; \ + int ret = -EINVAL; \ \ - static_assert(__same_type(struct iommufd_viommu, \ - ((drv_struct *)NULL)->member)); \ - static_assert(offsetof(drv_struct, member.obj) == 0); \ - ret = (drv_struct *)_iommufd_object_alloc( \ - ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU); \ - if (!IS_ERR(ret)) \ - ret->member.ops = viommu_ops; \ + static_assert(__same_type(struct iommufd_hw_queue, \ + dependent->member)); \ + static_assert(__same_type(typeof(*dependent), *depended)); \ + if (!WARN_ON_ONCE(dependent->member.viommu != \ + depended->member.viommu)) \ + ret = _iommufd_object_depend(&dependent->member.obj, \ + &depended->member.obj); \ ret; \ }) + +#define iommufd_hw_queue_undepend(dependent, depended, member) \ + ({ \ + static_assert(__same_type(struct iommufd_hw_queue, \ + dependent->member)); \ + static_assert(__same_type(typeof(*dependent), *depended)); \ + WARN_ON_ONCE(dependent->member.viommu != \ + depended->member.viommu); \ + _iommufd_object_undepend(&dependent->member.obj, \ + &depended->member.obj); \ + }) + +/* + * Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure. + * + * To support an mmappable MMIO region, kernel driver must first register it to + * iommufd core to allocate an @offset, during a driver-structure initialization + * (e.g. viommu_init op). Then, it should report to user space this @offset and + * the @length of the MMIO region for mmap syscall. + */ +static inline int iommufd_viommu_alloc_mmap(struct iommufd_viommu *viommu, + phys_addr_t mmio_addr, + size_t length, + unsigned long *offset) +{ + return _iommufd_alloc_mmap(viommu->ictx, &viommu->obj, mmio_addr, + length, offset); +} + +static inline void iommufd_viommu_destroy_mmap(struct iommufd_viommu *viommu, + unsigned long offset) +{ + _iommufd_destroy_mmap(viommu->ictx, &viommu->obj, offset); +} #endif diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index b25377b6ea98..5210e8371238 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -60,7 +60,8 @@ static inline int __get_task_ioprio(struct task_struct *p) int prio; if (!ioc) - return IOPRIO_DEFAULT; + return IOPRIO_PRIO_VALUE(task_nice_ioclass(p), + task_nice_ioprio(p)); if (p != current) lockdep_assert_held(&p->alloc_lock); diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h index 4fb2f93d82ed..d643c7c87822 100644 --- a/include/linux/irq-entry-common.h +++ b/include/linux/irq-entry-common.h @@ -7,6 +7,7 @@ #include <linux/context_tracking.h> #include <linux/tick.h> #include <linux/kmsan.h> +#include <linux/unwind_deferred.h> #include <asm/entry-common.h> @@ -256,6 +257,7 @@ static __always_inline void exit_to_user_mode(void) lockdep_hardirqs_on_prepare(); instrumentation_end(); + unwind_reset_info(); user_enter_irqoff(); arch_exit_to_user_mode(); lockdep_hardirqs_on(CALLER_ADDR0); diff --git a/include/linux/jhash.h b/include/linux/jhash.h index fa26a2dd3b52..7c1c1821c694 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h @@ -24,7 +24,7 @@ * Jozsef */ #include <linux/bitops.h> -#include <linux/unaligned/packed_struct.h> +#include <linux/unaligned.h> /* Best hash sizes are of power of two */ #define jhash_size(n) ((u32)1<<(n)) @@ -77,9 +77,9 @@ static inline u32 jhash(const void *key, u32 length, u32 initval) /* All but the last block: affect some 32 bits of (a,b,c) */ while (length > 12) { - a += __get_unaligned_cpu32(k); - b += __get_unaligned_cpu32(k + 4); - c += __get_unaligned_cpu32(k + 8); + a += get_unaligned((u32 *)k); + b += get_unaligned((u32 *)(k + 4)); + c += get_unaligned((u32 *)(k + 8)); __jhash_mix(a, b, c); length -= 12; k += 12; diff --git a/include/linux/kexec.h b/include/linux/kexec.h index 03f85ad03025..1b10a5d84b68 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -79,6 +79,12 @@ extern note_buf_t __percpu *crash_notes; typedef unsigned long kimage_entry_t; +/* + * This is a copy of the UAPI struct kexec_segment and must be identical + * to it because it gets copied straight from user space into kernel + * memory. Do not modify this structure unless you change the way segments + * get ingested from user space. + */ struct kexec_segment { /* * This pointer can point to user memory if kexec_load() system @@ -172,6 +178,7 @@ int kexec_image_post_load_cleanup_default(struct kimage *image); * @buf_align: Minimum alignment needed. * @buf_min: The buffer can't be placed below this address. * @buf_max: The buffer can't be placed above this address. + * @cma: CMA page if the buffer is backed by CMA. * @top_down: Allocate from top of memory. * @random: Place the buffer at a random position. */ @@ -184,6 +191,7 @@ struct kexec_buf { unsigned long buf_align; unsigned long buf_min; unsigned long buf_max; + struct page *cma; bool top_down; #ifdef CONFIG_CRASH_DUMP bool random; @@ -340,6 +348,7 @@ struct kimage { unsigned long nr_segments; struct kexec_segment segment[KEXEC_SEGMENT_MAX]; + struct page *segment_cma[KEXEC_SEGMENT_MAX]; struct list_head control_pages; struct list_head dest_pages; @@ -361,6 +370,7 @@ struct kimage { */ unsigned int hotplug_support:1; #endif + unsigned int no_cma:1; #ifdef ARCH_HAS_KIMAGE_ARCH struct kimage_arch arch; diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h index b8d69cfbb58b..ff6120463745 100644 --- a/include/linux/khugepaged.h +++ b/include/linux/khugepaged.h @@ -12,7 +12,7 @@ extern int start_stop_khugepaged(void); extern void __khugepaged_enter(struct mm_struct *mm); extern void __khugepaged_exit(struct mm_struct *mm); extern void khugepaged_enter_vma(struct vm_area_struct *vma, - unsigned long vm_flags); + vm_flags_t vm_flags); extern void khugepaged_min_free_kbytes_update(void); extern bool current_is_khugepaged(void); extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr, @@ -37,7 +37,7 @@ static inline void khugepaged_exit(struct mm_struct *mm) { } static inline void khugepaged_enter_vma(struct vm_area_struct *vma, - unsigned long vm_flags) + vm_flags_t vm_flags) { } static inline int collapse_pte_mapped_thp(struct mm_struct *mm, diff --git a/include/linux/ksm.h b/include/linux/ksm.h index d73095b5cd96..c17b955e7b0b 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -16,9 +16,9 @@ #ifdef CONFIG_KSM int ksm_madvise(struct vm_area_struct *vma, unsigned long start, - unsigned long end, int advice, unsigned long *vm_flags); - -void ksm_add_vma(struct vm_area_struct *vma); + unsigned long end, int advice, vm_flags_t *vm_flags); +vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file, + vm_flags_t vm_flags); int ksm_enable_merge_any(struct mm_struct *mm); int ksm_disable_merge_any(struct mm_struct *mm); int ksm_disable(struct mm_struct *mm); @@ -97,8 +97,10 @@ bool ksm_process_mergeable(struct mm_struct *mm); #else /* !CONFIG_KSM */ -static inline void ksm_add_vma(struct vm_area_struct *vma) +static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm, + const struct file *file, vm_flags_t vm_flags) { + return vm_flags; } static inline int ksm_disable(struct mm_struct *mm) @@ -131,7 +133,7 @@ static inline void collect_procs_ksm(const struct folio *folio, #ifdef CONFIG_MMU static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, - unsigned long end, int advice, unsigned long *vm_flags) + unsigned long end, int advice, vm_flags_t *vm_flags) { return 0; } diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h index 21ec856c36bc..775a96217518 100644 --- a/include/linux/led-class-flash.h +++ b/include/linux/led-class-flash.h @@ -197,7 +197,7 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev); * @fled_cdev: the flash LED to set * @timeout: the flash timeout to set it to * - * Set the flash strobe duration. + * Set the flash strobe timeout. * * Returns: 0 on success or negative error value on failure */ diff --git a/include/linux/leds.h b/include/linux/leds.h index b3f0aa081064..b16b803cc1ac 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h @@ -294,7 +294,6 @@ void led_remove_lookup(struct led_lookup_data *led_lookup); struct led_classdev *__must_check led_get(struct device *dev, char *con_id); struct led_classdev *__must_check devm_led_get(struct device *dev, char *con_id); -extern struct led_classdev *of_led_get(struct device_node *np, int index); extern void led_put(struct led_classdev *led_cdev); struct led_classdev *__must_check devm_of_led_get(struct device *dev, int index); diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index e772aae71843..28f086c4a187 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h @@ -6,12 +6,12 @@ */ #ifndef __LIBNVDIMM_H__ #define __LIBNVDIMM_H__ -#include <linux/kernel.h> + +#include <linux/io.h> #include <linux/sizes.h> +#include <linux/spinlock.h> #include <linux/types.h> #include <linux/uuid.h> -#include <linux/spinlock.h> -#include <linux/bio.h> struct badrange_entry { u64 start; @@ -80,7 +80,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm, unsigned int cmd, void *buf, unsigned int buf_len, int *cmd_rc); +struct attribute_group; struct device_node; +struct module; struct nvdimm_bus_descriptor { const struct attribute_group **attr_groups; unsigned long cmd_mask; @@ -121,6 +123,8 @@ struct nd_mapping_desc { int position; }; +struct bio; +struct resource; struct nd_region; struct nd_region_desc { struct resource *res; @@ -147,8 +151,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev, return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0); } -struct nvdimm_bus; - /* * Note that separate bits for locked + unlocked are defined so that * 'flags == 0' corresponds to an error / not-supported state. @@ -238,6 +240,9 @@ struct nvdimm_fw_ops { int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg); }; +struct kobject; +struct nvdimm_bus; + void badrange_init(struct badrange *badrange); int badrange_add(struct badrange *badrange, u64 addr, u64 length); void badrange_forget(struct badrange *badrange, phys_addr_t start, diff --git a/include/linux/llist.h b/include/linux/llist.h index 27b17f64bcee..607b2360c938 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -83,7 +83,7 @@ static inline void init_llist_head(struct llist_head *list) */ static inline void init_llist_node(struct llist_node *node) { - node->next = node; + WRITE_ONCE(node->next, node); } /** @@ -97,7 +97,7 @@ static inline void init_llist_node(struct llist_node *node) */ static inline bool llist_on_list(const struct llist_node *node) { - return node->next != node; + return READ_ONCE(node->next) != node; } /** @@ -220,7 +220,7 @@ static inline bool llist_empty(const struct llist_head *head) static inline struct llist_node *llist_next(struct llist_node *node) { - return node->next; + return READ_ONCE(node->next); } /** diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h index 9ef129038224..bafe143b1f78 100644 --- a/include/linux/maple_tree.h +++ b/include/linux/maple_tree.h @@ -75,8 +75,8 @@ * searching for gaps or any other code that needs to find the end of the data. */ struct maple_metadata { - unsigned char end; - unsigned char gap; + unsigned char end; /* end of data */ + unsigned char gap; /* offset of largest gap */ }; /* diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 87b6688f124a..785173aa0739 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -251,8 +251,10 @@ struct mem_cgroup { * that this indicator should NOT be used in legacy cgroup mode * where socket memory is accounted/charged separately. */ - unsigned long socket_pressure; - + u64 socket_pressure; +#if BITS_PER_LONG < 64 + seqlock_t socket_pressure_seqlock; +#endif int kmemcg_id; /* * memcg->objcg is wiped out as a part of the objcg repaprenting @@ -1602,6 +1604,42 @@ extern struct static_key_false memcg_sockets_enabled_key; #define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key) void mem_cgroup_sk_alloc(struct sock *sk); void mem_cgroup_sk_free(struct sock *sk); + +#if BITS_PER_LONG < 64 +static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg) +{ + u64 val = get_jiffies_64() + HZ; + unsigned long flags; + + write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags); + memcg->socket_pressure = val; + write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags); +} + +static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg) +{ + unsigned int seq; + u64 val; + + do { + seq = read_seqbegin(&memcg->socket_pressure_seqlock); + val = memcg->socket_pressure; + } while (read_seqretry(&memcg->socket_pressure_seqlock, seq)); + + return val; +} +#else +static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg) +{ + WRITE_ONCE(memcg->socket_pressure, jiffies + HZ); +} + +static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg) +{ + return READ_ONCE(memcg->socket_pressure); +} +#endif + static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) { #ifdef CONFIG_MEMCG_V1 @@ -1609,7 +1647,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) return !!memcg->tcpmem_pressure; #endif /* CONFIG_MEMCG_V1 */ do { - if (time_before(jiffies, READ_ONCE(memcg->socket_pressure))) + if (time_before64(get_jiffies_64(), mem_cgroup_get_socket_pressure(memcg))) return true; } while ((memcg = parent_mem_cgroup(memcg))); return false; diff --git a/include/linux/memfd.h b/include/linux/memfd.h index 246daadbfde8..6f606d9573c3 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -14,7 +14,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); * We also update VMA flags if appropriate by manipulating the VMA flags pointed * to by vm_flags_ptr. */ -int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr); +int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { @@ -25,7 +25,7 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx) return ERR_PTR(-EINVAL); } static inline int memfd_check_seals_mmap(struct file *file, - unsigned long *vm_flags_ptr) + vm_flags_t *vm_flags_ptr) { return 0; } diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h index 0dc0cf2863e2..7a805796fcfd 100644 --- a/include/linux/memory-tiers.h +++ b/include/linux/memory-tiers.h @@ -18,7 +18,7 @@ * adistance value (slightly faster) than default DRAM adistance to be part of * the same memory tier. */ -#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1)) +#define MEMTIER_ADISTANCE_DRAM ((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1)) struct memory_tier; struct memory_dev_type { diff --git a/include/linux/memory.h b/include/linux/memory.h index 5ec4e6d209b9..40eb70ccb09d 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h @@ -109,8 +109,6 @@ struct memory_notify { unsigned long altmap_nr_pages; unsigned long start_pfn; unsigned long nr_pages; - int status_change_nid_normal; - int status_change_nid; }; struct notifier_block; @@ -179,12 +177,30 @@ struct memory_group *memory_group_find_by_id(int mgid); typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *); int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func, struct memory_group *excluded, void *arg); +struct memory_block *find_memory_block_by_id(unsigned long block_id); #define hotplug_memory_notifier(fn, pri) ({ \ static __meminitdata struct notifier_block fn##_mem_nb =\ { .notifier_call = fn, .priority = pri };\ register_memory_notifier(&fn##_mem_nb); \ }) +extern int sections_per_block; + +static inline unsigned long memory_block_id(unsigned long section_nr) +{ + return section_nr / sections_per_block; +} + +static inline unsigned long pfn_to_block_id(unsigned long pfn) +{ + return memory_block_id(pfn_to_section_nr(pfn)); +} + +static inline unsigned long phys_to_block_id(unsigned long phys) +{ + return pfn_to_block_id(PFN_DOWN(phys)); +} + #ifdef CONFIG_NUMA void memory_block_add_nid(struct memory_block *mem, int nid, enum meminit_context context); diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index eaac5ae8c05c..23f038a16231 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h @@ -314,7 +314,8 @@ extern int add_memory_driver_managed(int nid, u64 start, u64 size, mhp_t mhp_flags); extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages, - struct vmem_altmap *altmap, int migratetype); + struct vmem_altmap *altmap, int migratetype, + bool isolate_pageblock); extern void remove_pfn_range_from_zone(struct zone *zone, unsigned long start_pfn, unsigned long nr_pages); diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h index 556375b91316..9acd703dd5ca 100644 --- a/include/linux/mfd/davinci_voicecodec.h +++ b/include/linux/mfd/davinci_voicecodec.h @@ -10,11 +10,13 @@ #ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_ #define __LINUX_MFD_DAVINCI_VOICECODEC_H_ -#include <linux/kernel.h> -#include <linux/platform_device.h> +#include <linux/bits.h> #include <linux/mfd/core.h> -#include <linux/platform_data/edma.h> +#include <linux/types.h> +struct clk; +struct device; +struct platform_device; struct regmap; /* diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h index 32e3470708ed..7e84738cbb20 100644 --- a/include/linux/mfd/madera/pdata.h +++ b/include/linux/mfd/madera/pdata.h @@ -8,10 +8,11 @@ #ifndef MADERA_PDATA_H #define MADERA_PDATA_H -#include <linux/kernel.h> #include <linux/regulator/arizona-ldo1.h> #include <linux/regulator/arizona-micsupp.h> #include <linux/regulator/machine.h> +#include <linux/types.h> + #include <sound/madera-pdata.h> #define MADERA_MAX_MICBIAS 4 diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h deleted file mode 100644 index 42d2b0e4884e..000000000000 --- a/include/linux/mfd/pcf50633/core.h +++ /dev/null @@ -1,229 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -/* - * core.h -- Core driver for NXP PCF50633 - * - * (C) 2006-2008 by Openmoko, Inc. - * All rights reserved. - */ - -#ifndef __LINUX_MFD_PCF50633_CORE_H -#define __LINUX_MFD_PCF50633_CORE_H - -#include <linux/i2c.h> -#include <linux/workqueue.h> -#include <linux/regulator/driver.h> -#include <linux/regulator/machine.h> -#include <linux/pm.h> -#include <linux/power_supply.h> - -struct pcf50633; -struct regmap; - -#define PCF50633_NUM_REGULATORS 11 - -struct pcf50633_platform_data { - struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS]; - - char **batteries; - int num_batteries; - - /* - * Should be set accordingly to the reference resistor used, see - * I_{ch(ref)} charger reference current in the pcf50633 User - * Manual. - */ - int charger_reference_current_ma; - - /* Callbacks */ - void (*probe_done)(struct pcf50633 *); - void (*mbc_event_callback)(struct pcf50633 *, int); - void (*regulator_registered)(struct pcf50633 *, int); - void (*force_shutdown)(struct pcf50633 *); - - u8 resumers[5]; -}; - -struct pcf50633_irq { - void (*handler) (int, void *); - void *data; -}; - -int pcf50633_register_irq(struct pcf50633 *pcf, int irq, - void (*handler) (int, void *), void *data); -int pcf50633_free_irq(struct pcf50633 *pcf, int irq); - -int pcf50633_irq_mask(struct pcf50633 *pcf, int irq); -int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq); -int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq); - -int pcf50633_read_block(struct pcf50633 *, u8 reg, - int nr_regs, u8 *data); -int pcf50633_write_block(struct pcf50633 *pcf, u8 reg, - int nr_regs, u8 *data); -u8 pcf50633_reg_read(struct pcf50633 *, u8 reg); -int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val); - -int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val); -int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits); - -/* Interrupt registers */ - -#define PCF50633_REG_INT1 0x02 -#define PCF50633_REG_INT2 0x03 -#define PCF50633_REG_INT3 0x04 -#define PCF50633_REG_INT4 0x05 -#define PCF50633_REG_INT5 0x06 - -#define PCF50633_REG_INT1M 0x07 -#define PCF50633_REG_INT2M 0x08 -#define PCF50633_REG_INT3M 0x09 -#define PCF50633_REG_INT4M 0x0a -#define PCF50633_REG_INT5M 0x0b - -enum { - /* Chip IRQs */ - PCF50633_IRQ_ADPINS, - PCF50633_IRQ_ADPREM, - PCF50633_IRQ_USBINS, - PCF50633_IRQ_USBREM, - PCF50633_IRQ_RESERVED1, - PCF50633_IRQ_RESERVED2, - PCF50633_IRQ_ALARM, - PCF50633_IRQ_SECOND, - PCF50633_IRQ_ONKEYR, - PCF50633_IRQ_ONKEYF, - PCF50633_IRQ_EXTON1R, - PCF50633_IRQ_EXTON1F, - PCF50633_IRQ_EXTON2R, - PCF50633_IRQ_EXTON2F, - PCF50633_IRQ_EXTON3R, - PCF50633_IRQ_EXTON3F, - PCF50633_IRQ_BATFULL, - PCF50633_IRQ_CHGHALT, - PCF50633_IRQ_THLIMON, - PCF50633_IRQ_THLIMOFF, - PCF50633_IRQ_USBLIMON, - PCF50633_IRQ_USBLIMOFF, - PCF50633_IRQ_ADCRDY, - PCF50633_IRQ_ONKEY1S, - PCF50633_IRQ_LOWSYS, - PCF50633_IRQ_LOWBAT, - PCF50633_IRQ_HIGHTMP, - PCF50633_IRQ_AUTOPWRFAIL, - PCF50633_IRQ_DWN1PWRFAIL, - PCF50633_IRQ_DWN2PWRFAIL, - PCF50633_IRQ_LEDPWRFAIL, - PCF50633_IRQ_LEDOVP, - PCF50633_IRQ_LDO1PWRFAIL, - PCF50633_IRQ_LDO2PWRFAIL, - PCF50633_IRQ_LDO3PWRFAIL, - PCF50633_IRQ_LDO4PWRFAIL, - PCF50633_IRQ_LDO5PWRFAIL, - PCF50633_IRQ_LDO6PWRFAIL, - PCF50633_IRQ_HCLDOPWRFAIL, - PCF50633_IRQ_HCLDOOVL, - - /* Always last */ - PCF50633_NUM_IRQ, -}; - -struct pcf50633 { - struct device *dev; - struct regmap *regmap; - - struct pcf50633_platform_data *pdata; - int irq; - struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ]; - struct work_struct irq_work; - struct workqueue_struct *work_queue; - struct mutex lock; - - u8 mask_regs[5]; - - u8 suspend_irq_masks[5]; - u8 resume_reason[5]; - int is_suspended; - - int onkey1s_held; - - struct platform_device *rtc_pdev; - struct platform_device *mbc_pdev; - struct platform_device *adc_pdev; - struct platform_device *input_pdev; - struct platform_device *bl_pdev; - struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS]; -}; - -enum pcf50633_reg_int1 { - PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */ - PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */ - PCF50633_INT1_USBINS = 0x04, /* USB inserted */ - PCF50633_INT1_USBREM = 0x08, /* USB removed */ - /* reserved */ - PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */ - PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */ -}; - -enum pcf50633_reg_int2 { - PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */ - PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */ - PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */ - PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */ - PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */ - PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */ - PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */ - PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */ -}; - -enum pcf50633_reg_int3 { - PCF50633_INT3_BATFULL = 0x01, /* Battery full */ - PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */ - PCF50633_INT3_THLIMON = 0x04, - PCF50633_INT3_THLIMOFF = 0x08, - PCF50633_INT3_USBLIMON = 0x10, - PCF50633_INT3_USBLIMOFF = 0x20, - PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */ - PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */ -}; - -enum pcf50633_reg_int4 { - PCF50633_INT4_LOWSYS = 0x01, - PCF50633_INT4_LOWBAT = 0x02, - PCF50633_INT4_HIGHTMP = 0x04, - PCF50633_INT4_AUTOPWRFAIL = 0x08, - PCF50633_INT4_DWN1PWRFAIL = 0x10, - PCF50633_INT4_DWN2PWRFAIL = 0x20, - PCF50633_INT4_LEDPWRFAIL = 0x40, - PCF50633_INT4_LEDOVP = 0x80, -}; - -enum pcf50633_reg_int5 { - PCF50633_INT5_LDO1PWRFAIL = 0x01, - PCF50633_INT5_LDO2PWRFAIL = 0x02, - PCF50633_INT5_LDO3PWRFAIL = 0x04, - PCF50633_INT5_LDO4PWRFAIL = 0x08, - PCF50633_INT5_LDO5PWRFAIL = 0x10, - PCF50633_INT5_LDO6PWRFAIL = 0x20, - PCF50633_INT5_HCLDOPWRFAIL = 0x40, - PCF50633_INT5_HCLDOOVL = 0x80, -}; - -/* misc. registers */ -#define PCF50633_REG_OOCSHDWN 0x0c - -/* LED registers */ -#define PCF50633_REG_LEDOUT 0x28 -#define PCF50633_REG_LEDENA 0x29 -#define PCF50633_REG_LEDCTL 0x2a -#define PCF50633_REG_LEDDIM 0x2b - -static inline struct pcf50633 *dev_to_pcf50633(struct device *dev) -{ - return dev_get_drvdata(dev); -} - -int pcf50633_irq_init(struct pcf50633 *pcf, int irq); -void pcf50633_irq_free(struct pcf50633 *pcf); -extern const struct dev_pm_ops pcf50633_pm; - -#endif diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h index 69cbea78b430..28170ee08898 100644 --- a/include/linux/mfd/rk808.h +++ b/include/linux/mfd/rk808.h @@ -812,6 +812,8 @@ enum rk806_pin_dr_sel { #define RK806_INT_POL_H BIT(1) #define RK806_INT_POL_L 0 +/* SYS_CFG3 */ +#define RK806_RST_FUN_MSK GENMASK(7, 6) #define RK806_SLAVE_RESTART_FUN_MSK BIT(1) #define RK806_SLAVE_RESTART_FUN_EN BIT(1) #define RK806_SLAVE_RESTART_FUN_OFF 0 diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h index e9e24f4c4578..9b9119c742a2 100644 --- a/include/linux/mfd/syscon/atmel-smc.h +++ b/include/linux/mfd/syscon/atmel-smc.h @@ -11,9 +11,11 @@ #ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_ #define _LINUX_MFD_SYSCON_ATMEL_SMC_H_ -#include <linux/kernel.h> -#include <linux/of.h> -#include <linux/regmap.h> +#include <linux/bits.h> +#include <linux/types.h> + +struct device_node; +struct regmap; #define ATMEL_SMC_SETUP(cs) (((cs) * 0x10)) #define ATMEL_HSMC_SETUP(layout, cs) \ diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h index 3e8d29189267..55234e771ba7 100644 --- a/include/linux/mfd/tps65219.h +++ b/include/linux/mfd/tps65219.h @@ -10,7 +10,6 @@ #define MFD_TPS65219_H #include <linux/bitops.h> -#include <linux/notifier.h> #include <linux/regmap.h> #include <linux/regulator/driver.h> @@ -438,17 +437,13 @@ enum tps65219_irqs { * * @dev: MFD device * @regmap: Regmap for accessing the device registers - * @chip_id: Chip ID * @irq_data: Regmap irq data used for the irq chip - * @nb: notifier block for the restart handler */ struct tps65219 { struct device *dev; struct regmap *regmap; - unsigned int chip_id; struct regmap_irq_chip_data *irq_data; - struct notifier_block nb; }; #endif /* MFD_TPS65219_H */ diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h index 85dc406173db..b31e07fa4d51 100644 --- a/include/linux/mfd/twl.h +++ b/include/linux/mfd/twl.h @@ -205,27 +205,6 @@ int twl_get_hfclk_rate(void); int twl6030_interrupt_unmask(u8 bit_mask, u8 offset); int twl6030_interrupt_mask(u8 bit_mask, u8 offset); -/* Card detect Configuration for MMC1 Controller on OMAP4 */ -#ifdef CONFIG_TWL4030_CORE -int twl6030_mmc_card_detect_config(void); -#else -static inline int twl6030_mmc_card_detect_config(void) -{ - pr_debug("twl6030_mmc_card_detect_config not supported\n"); - return 0; -} -#endif - -/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */ -#ifdef CONFIG_TWL4030_CORE -int twl6030_mmc_card_detect(struct device *dev, int slot); -#else -static inline int twl6030_mmc_card_detect(struct device *dev, int slot) -{ - pr_debug("Call back twl6030_mmc_card_detect not supported\n"); - return -EIO; -} -#endif /*----------------------------------------------------------------------*/ /* diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index a3241e4d7548..5f70d3b5d1b1 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h @@ -8,11 +8,12 @@ #ifndef __LINUX_MFD_WM8350_CORE_H_ #define __LINUX_MFD_WM8350_CORE_H_ -#include <linux/kernel.h> -#include <linux/mutex.h> -#include <linux/interrupt.h> #include <linux/completion.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/mutex.h> #include <linux/regmap.h> +#include <linux/types.h> #include <linux/mfd/wm8350/audio.h> #include <linux/mfd/wm8350/gpio.h> @@ -21,6 +22,9 @@ #include <linux/mfd/wm8350/supply.h> #include <linux/mfd/wm8350/wdt.h> +struct device; +struct platform_device; + /* * Register values. */ diff --git a/include/linux/migrate.h b/include/linux/migrate.h index aaa2114498d6..acadd41e0b5c 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -35,8 +35,8 @@ struct migration_target_control; * @src page. The driver should copy the contents of the * @src page to the @dst page and set up the fields of @dst page. * Both pages are locked. - * If page migration is successful, the driver should call - * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS. + * If page migration is successful, the driver should + * return MIGRATEPAGE_SUCCESS. * If the driver cannot migrate the page at the moment, it can return * -EAGAIN. The VM interprets this as a temporary migration failure and * will retry it later. Any other error value is a permanent migration @@ -69,7 +69,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free, unsigned long private, enum migrate_mode mode, int reason, unsigned int *ret_succeeded); struct folio *alloc_migration_target(struct folio *src, unsigned long private); -bool isolate_movable_page(struct page *page, isolate_mode_t mode); +bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode); bool isolate_folio_to_list(struct folio *folio, struct list_head *list); int migrate_huge_page_move_mapping(struct address_space *mapping, @@ -90,7 +90,7 @@ static inline int migrate_pages(struct list_head *l, new_folio_t new, static inline struct folio *alloc_migration_target(struct folio *src, unsigned long private) { return NULL; } -static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) +static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode) { return false; } static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list) { return false; } @@ -103,44 +103,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping, #endif /* CONFIG_MIGRATION */ -#ifdef CONFIG_COMPACTION -bool PageMovable(struct page *page); -void __SetPageMovable(struct page *page, const struct movable_operations *ops); -void __ClearPageMovable(struct page *page); -#else -static inline bool PageMovable(struct page *page) { return false; } -static inline void __SetPageMovable(struct page *page, - const struct movable_operations *ops) -{ -} -static inline void __ClearPageMovable(struct page *page) -{ -} -#endif - -static inline bool folio_test_movable(struct folio *folio) -{ - return PageMovable(&folio->page); -} - -static inline -const struct movable_operations *folio_movable_ops(struct folio *folio) -{ - VM_BUG_ON(!__folio_test_movable(folio)); - - return (const struct movable_operations *) - ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE); -} - -static inline -const struct movable_operations *page_movable_ops(struct page *page) -{ - VM_BUG_ON(!__PageMovable(page)); - - return (const struct movable_operations *) - ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE); -} - #ifdef CONFIG_NUMA_BALANCING int migrate_misplaced_folio_prepare(struct folio *folio, struct vm_area_struct *vma, int node); diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index e6ba8f4f4bd1..8c5fbfb85749 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h @@ -36,6 +36,7 @@ #include <linux/kernel.h> #include <linux/completion.h> #include <linux/pci.h> +#include <linux/pci-tph.h> #include <linux/irq.h> #include <linux/spinlock_types.h> #include <linux/semaphore.h> @@ -688,6 +689,7 @@ struct mlx5_fw_tracer; struct mlx5_vxlan; struct mlx5_geneve; struct mlx5_hv_vhca; +struct mlx5_st; #define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity)) #define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev)) @@ -757,6 +759,7 @@ struct mlx5_core_dev { u32 issi; struct mlx5e_resources mlx5e_res; struct mlx5_dm *dm; + struct mlx5_st *st; struct mlx5_vxlan *vxlan; struct mlx5_geneve *geneve; struct { @@ -1160,6 +1163,23 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type, u64 length, u16 uid, phys_addr_t addr, u32 obj_id); +#ifdef CONFIG_PCIE_TPH +int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *st_index); +int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index); +#else +static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev, + enum tph_mem_type mem_type, + unsigned int cpu_uid, u16 *st_index) +{ + return -EOPNOTSUPP; +} +static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index) +{ + return -EOPNOTSUPP; +} +#endif + struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev); void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev); @@ -1349,4 +1369,9 @@ enum { }; bool mlx5_wc_support_get(struct mlx5_core_dev *mdev); + +static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev) +{ + return devlink_net(priv_to_devlink(dev)); +} #endif /* MLX5_DRIVER_H */ diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index ed4130e49c27..8360d9011d4f 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h @@ -1871,7 +1871,9 @@ struct mlx5_ifc_cmd_hca_cap_bits { u8 reserved_at_280[0x10]; u8 max_wqe_sz_sq[0x10]; - u8 reserved_at_2a0[0xb]; + u8 reserved_at_2a0[0x7]; + u8 mkey_pcie_tph[0x1]; + u8 reserved_at_2a8[0x3]; u8 shampo[0x1]; u8 reserved_at_2ac[0x4]; u8 max_wqe_sz_rq[0x10]; @@ -4418,6 +4420,10 @@ enum { MLX5_MKC_ACCESS_MODE_CROSSING = 0x6, }; +enum { + MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX = 0, +}; + struct mlx5_ifc_mkc_bits { u8 reserved_at_0[0x1]; u8 free[0x1]; @@ -4469,7 +4475,11 @@ struct mlx5_ifc_mkc_bits { u8 relaxed_ordering_read[0x1]; u8 log_page_size[0x6]; - u8 reserved_at_1e0[0x20]; + u8 reserved_at_1e0[0x5]; + u8 pcie_tph_en[0x1]; + u8 pcie_tph_ph[0x2]; + u8 pcie_tph_steering_tag_index[0x8]; + u8 reserved_at_1f0[0x10]; }; struct mlx5_ifc_pkey_bits { diff --git a/include/linux/mm.h b/include/linux/mm.h index 0c44bb8ce544..1ae97a0b8ec7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -414,8 +414,10 @@ extern unsigned int kobjsize(const void *objp); #endif #ifdef CONFIG_64BIT -/* VM is sealed, in vm_flags */ -#define VM_SEALED _BITUL(63) +#define VM_SEALED_BIT 42 +#define VM_SEALED BIT(VM_SEALED_BIT) +#else +#define VM_SEALED VM_NONE #endif /* Bits set in the VMA until the stack is in its final location */ @@ -1818,7 +1820,24 @@ static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot) { return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot)); } -#endif + +#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD +/** + * folio_mk_pud - Create a PUD for this folio + * @folio: The folio to create a PUD for + * @pgprot: The page protection bits to use + * + * Create a page table entry for the first page of this folio. + * This is suitable for passing to set_pud_at(). + * + * Return: A page table entry suitable for mapping this folio. + */ +static inline pud_t folio_mk_pud(struct folio *folio, pgprot_t pgprot) +{ + return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot)); +} +#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */ +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_MMU */ static inline bool folio_has_pincount(const struct folio *folio) @@ -2152,13 +2171,13 @@ static inline int folio_expected_ref_count(const struct folio *folio) const int order = folio_order(folio); int ref_count = 0; - if (WARN_ON_ONCE(folio_test_slab(folio))) + if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio))) return 0; if (folio_test_anon(folio)) { /* One reference per page from the swapcache. */ ref_count += folio_test_swapcache(folio) << order; - } else if (!((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS)) { + } else { /* One reference per page from the pagecache. */ ref_count += !!folio->mapping << order; /* One reference from PG_private. */ @@ -2549,7 +2568,7 @@ extern long change_protection(struct mmu_gather *tlb, unsigned long end, unsigned long cp_flags); extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb, struct vm_area_struct *vma, struct vm_area_struct **pprev, - unsigned long start, unsigned long end, unsigned long newflags); + unsigned long start, unsigned long end, vm_flags_t newflags); /* * doesn't attempt to fault and will return short. @@ -2694,13 +2713,6 @@ static inline pud_t pud_mkspecial(pud_t pud) } #endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */ -#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP -static inline int pte_devmap(pte_t pte) -{ - return 0; -} -#endif - extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, @@ -3313,9 +3325,9 @@ extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages); extern bool vma_is_special_mapping(const struct vm_area_struct *vma, const struct vm_special_mapping *sm); -extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, +struct vm_area_struct *_install_special_mapping(struct mm_struct *mm, unsigned long addr, unsigned long len, - unsigned long flags, + vm_flags_t vm_flags, const struct vm_special_mapping *spec); unsigned long randomize_stack_top(unsigned long stack_top); @@ -3479,10 +3491,10 @@ static inline bool range_in_vma(struct vm_area_struct *vma, } #ifdef CONFIG_MMU -pgprot_t vm_get_page_prot(unsigned long vm_flags); +pgprot_t vm_get_page_prot(vm_flags_t vm_flags); void vma_set_page_prot(struct vm_area_struct *vma); #else -static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) +static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags) { return __pgprot(0); } @@ -3519,9 +3531,9 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr, vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn, pgprot_t pgprot); vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr, - pfn_t pfn); + unsigned long pfn); vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma, - unsigned long addr, pfn_t pfn); + unsigned long addr, unsigned long pfn); int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma, @@ -4050,14 +4062,14 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping, #endif #ifdef CONFIG_ANON_VMA_NAME -int madvise_set_anon_name(struct mm_struct *mm, unsigned long start, - unsigned long len_in, - struct anon_vma_name *anon_name); +int set_anon_vma_name(unsigned long addr, unsigned long size, + const char __user *uname); #else -static inline int -madvise_set_anon_name(struct mm_struct *mm, unsigned long start, - unsigned long len_in, struct anon_vma_name *anon_name) { - return 0; +static inline +int set_anon_vma_name(unsigned long addr, unsigned long size, + const char __user *uname) +{ + return -EINVAL; } #endif @@ -4190,4 +4202,23 @@ static inline bool page_pool_page_is_pp(const struct page *page) } #endif +#define PAGE_SNAPSHOT_FAITHFUL (1 << 0) +#define PAGE_SNAPSHOT_PG_BUDDY (1 << 1) +#define PAGE_SNAPSHOT_PG_IDLE (1 << 2) + +struct page_snapshot { + struct folio folio_snapshot; + struct page page_snapshot; + unsigned long pfn; + unsigned long idx; + unsigned long flags; +}; + +static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps) +{ + return ps->flags & PAGE_SNAPSHOT_FAITHFUL; +} + +void snapshot_page(struct page_snapshot *ps, const struct page *page); + #endif /* _LINUX_MM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 0f0662157066..08bc2442db93 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -105,7 +105,6 @@ struct page { unsigned int order; }; }; - /* See page-flags.h for PAGE_MAPPING_FLAGS */ struct address_space *mapping; union { pgoff_t __folio_index; /* Our offset within mapping. */ @@ -1086,7 +1085,7 @@ struct mm_struct { unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ unsigned long stack_vm; /* VM_STACK */ - unsigned long def_flags; + vm_flags_t def_flags; /** * @write_protect_seq: Locked when any thread is write diff --git a/include/linux/mman.h b/include/linux/mman.h index f4c6346a8fcd..de9e8e6229a4 100644 --- a/include/linux/mman.h +++ b/include/linux/mman.h @@ -137,7 +137,7 @@ static inline bool arch_validate_flags(unsigned long flags) /* * Combine the mmap "prot" argument into "vm_flags" used internally. */ -static inline unsigned long +static inline vm_flags_t calc_vm_prot_bits(unsigned long prot, unsigned long pkey) { return _calc_vm_trans(prot, PROT_READ, VM_READ ) | @@ -149,7 +149,7 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey) /* * Combine the mmap "flags" argument into "vm_flags" used internally. */ -static inline unsigned long +static inline vm_flags_t calc_vm_flag_bits(struct file *file, unsigned long flags) { return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h index 5da384bd0a26..11a078de9150 100644 --- a/include/linux/mmap_lock.h +++ b/include/linux/mmap_lock.h @@ -12,6 +12,7 @@ extern int rcuwait_wake_up(struct rcuwait *w); #include <linux/tracepoint-defs.h> #include <linux/types.h> #include <linux/cleanup.h> +#include <linux/sched/mm.h> #define MMAP_LOCK_INITIALIZER(name) \ .mmap_lock = __RWSEM_INITIALIZER((name).mmap_lock), @@ -154,6 +155,10 @@ static inline void vma_refcount_put(struct vm_area_struct *vma) * reused and attached to a different mm before we lock it. * Returns the vma on success, NULL on failure to lock and EAGAIN if vma got * detached. + * + * WARNING! The vma passed to this function cannot be used if the function + * fails to lock it because in certain cases RCU lock is dropped and then + * reacquired. Once RCU lock is dropped the vma can be concurently freed. */ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm, struct vm_area_struct *vma) @@ -183,6 +188,31 @@ static inline struct vm_area_struct *vma_start_read(struct mm_struct *mm, } rwsem_acquire_read(&vma->vmlock_dep_map, 0, 1, _RET_IP_); + + /* + * If vma got attached to another mm from under us, that mm is not + * stable and can be freed in the narrow window after vma->vm_refcnt + * is dropped and before rcuwait_wake_up(mm) is called. Grab it before + * releasing vma->vm_refcnt. + */ + if (unlikely(vma->vm_mm != mm)) { + /* Use a copy of vm_mm in case vma is freed after we drop vm_refcnt */ + struct mm_struct *other_mm = vma->vm_mm; + + /* + * __mmdrop() is a heavy operation and we don't need RCU + * protection here. Release RCU lock during these operations. + * We reinstate the RCU read lock as the caller expects it to + * be held when this function returns even on error. + */ + rcu_read_unlock(); + mmgrab(other_mm); + vma_refcount_put(vma); + mmdrop(other_mm); + rcu_read_lock(); + return NULL; + } + /* * Overflow of vm_lock_seq/mm_lock_seq might produce false locked result. * False unlocked result is impossible because we modify and check @@ -309,6 +339,17 @@ void vma_mark_detached(struct vm_area_struct *vma); struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm, unsigned long address); +/* + * Locks next vma pointed by the iterator. Confirms the locked vma has not + * been modified and will retry under mmap_lock protection if modification + * was detected. Should be called from read RCU section. + * Returns either a valid locked VMA, NULL if no more VMAs or -EINTR if the + * process was interrupted. + */ +struct vm_area_struct *lock_next_vma(struct mm_struct *mm, + struct vma_iterator *iter, + unsigned long address); + #else /* CONFIG_PER_VMA_LOCK */ static inline void mm_lock_seqcount_init(struct mm_struct *mm) {} diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h index a0a3894900ed..14a45979cccc 100644 --- a/include/linux/mmdebug.h +++ b/include/linux/mmdebug.h @@ -89,6 +89,17 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); } \ unlikely(__ret_warn_once); \ }) +#define VM_WARN_ON_ONCE_VMA(cond, vma) ({ \ + static bool __section(".data..once") __warned; \ + int __ret_warn_once = !!(cond); \ + \ + if (unlikely(__ret_warn_once && !__warned)) { \ + dump_vma(vma); \ + __warned = true; \ + WARN_ON(1); \ + } \ + unlikely(__ret_warn_once); \ +}) #define VM_WARN_ON_VMG(cond, vmg) ({ \ int __ret_warn = !!(cond); \ \ @@ -115,6 +126,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi); #define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond) +#define VM_WARN_ON_ONCE_VMA(cond, vma) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond) #define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond) #define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 283913d42d7b..0c5da9141983 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -38,19 +38,19 @@ #define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1) /* Defines the order for the number of pages that have a migrate type. */ -#ifndef CONFIG_PAGE_BLOCK_ORDER -#define PAGE_BLOCK_ORDER MAX_PAGE_ORDER +#ifndef CONFIG_PAGE_BLOCK_MAX_ORDER +#define PAGE_BLOCK_MAX_ORDER MAX_PAGE_ORDER #else -#define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER -#endif /* CONFIG_PAGE_BLOCK_ORDER */ +#define PAGE_BLOCK_MAX_ORDER CONFIG_PAGE_BLOCK_MAX_ORDER +#endif /* CONFIG_PAGE_BLOCK_MAX_ORDER */ /* * The MAX_PAGE_ORDER, which defines the max order of pages to be allocated - * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_ORDER, + * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_MAX_ORDER, * which defines the order for the number of pages that can have a migrate type */ -#if (PAGE_BLOCK_ORDER > MAX_PAGE_ORDER) -#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_ORDER +#if (PAGE_BLOCK_MAX_ORDER > MAX_PAGE_ORDER) +#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_MAX_ORDER #endif /* @@ -79,6 +79,9 @@ enum migratetype { * __free_pageblock_cma() function. */ MIGRATE_CMA, + __MIGRATE_TYPE_END = MIGRATE_CMA, +#else + __MIGRATE_TYPE_END = MIGRATE_HIGHATOMIC, #endif #ifdef CONFIG_MEMORY_ISOLATION MIGRATE_ISOLATE, /* can't allocate from here */ @@ -92,8 +95,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES]; #ifdef CONFIG_CMA # define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA) # define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA) -# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \ - get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK)) +/* + * __dump_folio() in mm/debug.c passes a folio pointer to on-stack struct folio, + * so folio_pfn() cannot be used and pfn is needed. + */ +# define is_migrate_cma_folio(folio, pfn) \ + (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA) #else # define is_migrate_cma(migratetype) false # define is_migrate_cma_page(_page) false @@ -122,14 +129,12 @@ static inline bool migratetype_is_mergeable(int mt) extern int page_group_by_mobility_disabled; -#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1) +#define get_pageblock_migratetype(page) \ + get_pfnblock_migratetype(page, page_to_pfn(page)) -#define get_pageblock_migratetype(page) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK) +#define folio_migratetype(folio) \ + get_pageblock_migratetype(&folio->page) -#define folio_migratetype(folio) \ - get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \ - MIGRATETYPE_MASK) struct free_area { struct list_head free_list[MIGRATE_TYPES]; unsigned long nr_free; @@ -201,7 +206,6 @@ enum node_stat_item { NR_FILE_PAGES, NR_FILE_DIRTY, NR_WRITEBACK, - NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */ NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */ NR_SHMEM_THPS, NR_SHMEM_PMDMAPPED, diff --git a/include/linux/module.h b/include/linux/module.h index a7cac01d95e7..3319a5269d28 100644 --- a/include/linux/module.h +++ b/include/linux/module.h @@ -33,7 +33,7 @@ #include <linux/percpu.h> #include <asm/module.h> -#define MODULE_NAME_LEN MAX_PARAM_PREFIX_LEN +#define MODULE_NAME_LEN __MODULE_NAME_LEN struct modversion_info { unsigned long crc; @@ -165,9 +165,6 @@ extern void cleanup_module(void); struct module_kobject *lookup_or_create_module_kobject(const char *name); -/* Generic info of form tag = "info" */ -#define MODULE_INFO(tag, info) __MODULE_INFO(tag, tag, info) - /* For userspace: you can also call me... */ #define MODULE_ALIAS(_alias) MODULE_INFO(alias, _alias) @@ -303,23 +300,6 @@ static typeof(name) __mod_device_table__##type##__##name \ struct notifier_block; -#ifdef CONFIG_MODULES - -/* Get/put a kernel symbol (calls must be symmetric) */ -void *__symbol_get(const char *symbol); -void *__symbol_get_gpl(const char *symbol); -#define symbol_get(x) ({ \ - static const char __notrim[] \ - __used __section(".no_trim_symbol") = __stringify(x); \ - (typeof(&x))(__symbol_get(__stringify(x))); }) - -/* modules using other modules: kdb wants to see this. */ -struct module_use { - struct list_head source_list; - struct list_head target_list; - struct module *source, *target; -}; - enum module_state { MODULE_STATE_LIVE, /* Normal state. */ MODULE_STATE_COMING, /* Full formed, running module_init. */ @@ -604,6 +584,16 @@ struct module { #define MODULE_ARCH_INIT {} #endif +#ifdef CONFIG_MODULES + +/* Get/put a kernel symbol (calls must be symmetric) */ +void *__symbol_get(const char *symbol); +void *__symbol_get_gpl(const char *symbol); +#define symbol_get(x) ({ \ + static const char __notrim[] \ + __used __section(".no_trim_symbol") = __stringify(x); \ + (typeof(&x))(__symbol_get(__stringify(x))); }) + #ifndef HAVE_ARCH_KALLSYMS_SYMBOL_VALUE static inline unsigned long kallsyms_symbol_value(const Elf_Sym *sym) { diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h index bfb85fd13e1f..3a25122d83e2 100644 --- a/include/linux/moduleparam.h +++ b/include/linux/moduleparam.h @@ -6,6 +6,13 @@ #include <linux/stringify.h> #include <linux/kernel.h> +/* + * The maximum module name length, including the NUL byte. + * Chosen so that structs with an unsigned long line up, specifically + * modversion_info. + */ +#define __MODULE_NAME_LEN (64 - sizeof(unsigned long)) + /* You can override this manually, but generally this should match the module name. */ #ifdef MODULE @@ -17,21 +24,19 @@ #define __MODULE_INFO_PREFIX KBUILD_MODNAME "." #endif -/* Chosen so that structs with an unsigned long line up. */ -#define MAX_PARAM_PREFIX_LEN (64 - sizeof(unsigned long)) - -#define __MODULE_INFO(tag, name, info) \ - static const char __UNIQUE_ID(name)[] \ +/* Generic info of form tag = "info" */ +#define MODULE_INFO(tag, info) \ + static const char __UNIQUE_ID(modinfo)[] \ __used __section(".modinfo") __aligned(1) \ = __MODULE_INFO_PREFIX __stringify(tag) "=" info #define __MODULE_PARM_TYPE(name, _type) \ - __MODULE_INFO(parmtype, name##type, #name ":" _type) + MODULE_INFO(parmtype, #name ":" _type) /* One for each parameter, describing how to use it. Some files do multiple of these per line, so can't just use MODULE_INFO. */ #define MODULE_PARM_DESC(_parm, desc) \ - __MODULE_INFO(parm, _parm, #_parm ":" desc) + MODULE_INFO(parm, #_parm ":" desc) struct kernel_param; @@ -282,10 +287,9 @@ struct kparam_array #define __moduleparam_const const #endif -/* This is the fundamental function for registering boot/module - parameters. */ +/* This is the fundamental function for registering boot/module parameters. */ #define __module_param_call(prefix, name, ops, arg, perm, level, flags) \ - /* Default value instead of permissions? */ \ + static_assert(sizeof(""prefix) - 1 <= __MODULE_NAME_LEN); \ static const char __param_str_##name[] = prefix #name; \ static struct kernel_param __moduleparam_const __param_##name \ __used __section("__param") \ diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 1b56796f6cb3..288ef765a44e 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h @@ -8,15 +8,15 @@ #ifndef __LINUX_MTD_MAP_H__ #define __LINUX_MTD_MAP_H__ -#include <linux/types.h> -#include <linux/list.h> -#include <linux/string.h> #include <linux/bug.h> -#include <linux/kernel.h> #include <linux/io.h> - +#include <linux/ioport.h> +#include <linux/string.h> +#include <linux/types.h> #include <linux/unaligned.h> -#include <asm/barrier.h> + +struct device_node; +struct module; #ifdef CONFIG_MTD_MAP_BANK_WIDTH_1 #define map_bankwidth(map) 1 @@ -188,6 +188,7 @@ typedef union { of living. */ +struct mtd_chip_driver; struct map_info { const char *name; unsigned long size; diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h index 15eaa09da998..27a45bdab7ec 100644 --- a/include/linux/mtd/spinand.h +++ b/include/linux/mtd/spinand.h @@ -62,30 +62,33 @@ SPI_MEM_OP_NO_DUMMY, \ SPI_MEM_OP_NO_DATA) -#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, ...) \ +#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ SPI_MEM_OP_DATA_IN(len, buf, 1), \ - SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0)) + SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \ - SPI_MEM_OP_ADDR(2, addr, 1), \ - SPI_MEM_OP_DUMMY(ndummy, 1), \ - SPI_MEM_OP_DATA_IN(len, buf, 1)) + SPI_MEM_OP_ADDR(2, addr, 1), \ + SPI_MEM_OP_DUMMY(ndummy, 1), \ + SPI_MEM_OP_DATA_IN(len, buf, 1), \ + SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ - SPI_MEM_OP_DATA_IN(len, buf, 1)) + SPI_MEM_OP_DATA_IN(len, buf, 1), \ + SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ - SPI_MEM_OP_DATA_IN(len, buf, 1)) + SPI_MEM_OP_DATA_IN(len, buf, 1), \ + SPI_MEM_OP_MAX_FREQ(freq)) #define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \ @@ -94,17 +97,19 @@ SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \ SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ - SPI_MEM_OP_DATA_IN(len, buf, 2)) + SPI_MEM_OP_DATA_IN(len, buf, 2), \ + SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ - SPI_MEM_OP_DATA_IN(len, buf, 2)) + SPI_MEM_OP_DATA_IN(len, buf, 2), \ + SPI_MEM_OP_MAX_FREQ(freq)) #define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \ @@ -113,18 +118,19 @@ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \ SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, ...) \ +#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ SPI_MEM_OP_ADDR(2, addr, 2), \ SPI_MEM_OP_DUMMY(ndummy, 2), \ SPI_MEM_OP_DATA_IN(len, buf, 2), \ - SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0)) + SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \ SPI_MEM_OP_ADDR(3, addr, 2), \ SPI_MEM_OP_DUMMY(ndummy, 2), \ - SPI_MEM_OP_DATA_IN(len, buf, 2)) + SPI_MEM_OP_DATA_IN(len, buf, 2), \ + SPI_MEM_OP_MAX_FREQ(freq)) #define SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \ @@ -133,17 +139,19 @@ SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \ SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ SPI_MEM_OP_ADDR(2, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ - SPI_MEM_OP_DATA_IN(len, buf, 4)) + SPI_MEM_OP_DATA_IN(len, buf, 4), \ + SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \ SPI_MEM_OP_ADDR(3, addr, 1), \ SPI_MEM_OP_DUMMY(ndummy, 1), \ - SPI_MEM_OP_DATA_IN(len, buf, 4)) + SPI_MEM_OP_DATA_IN(len, buf, 4), \ + SPI_MEM_OP_MAX_FREQ(freq)) #define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \ @@ -152,18 +160,19 @@ SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \ SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, ...) \ +#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ SPI_MEM_OP_ADDR(2, addr, 4), \ SPI_MEM_OP_DUMMY(ndummy, 4), \ SPI_MEM_OP_DATA_IN(len, buf, 4), \ - SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0)) + SPI_MEM_OP_MAX_FREQ(freq)) -#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len) \ +#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \ SPI_MEM_OP_ADDR(3, addr, 4), \ SPI_MEM_OP_DUMMY(ndummy, 4), \ - SPI_MEM_OP_DATA_IN(len, buf, 4)) + SPI_MEM_OP_DATA_IN(len, buf, 4), \ + SPI_MEM_OP_MAX_FREQ(freq)) #define SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(addr, ndummy, buf, len, freq) \ SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \ @@ -484,6 +493,7 @@ struct spinand_user_otp { * @op_variants.update_cache: variants of the update-cache operation * @select_target: function used to select a target/die. Required only for * multi-die chips + * @configure_chip: Align the chip configuration with the core settings * @set_cont_read: enable/disable continuous cached reads * @fact_otp: SPI NAND factory OTP info. * @user_otp: SPI NAND user OTP info. @@ -507,6 +517,7 @@ struct spinand_info { } op_variants; int (*select_target)(struct spinand_device *spinand, unsigned int target); + int (*configure_chip)(struct spinand_device *spinand); int (*set_cont_read)(struct spinand_device *spinand, bool enable); struct spinand_fact_otp fact_otp; @@ -539,6 +550,9 @@ struct spinand_info { #define SPINAND_SELECT_TARGET(__func) \ .select_target = __func +#define SPINAND_CONFIGURE_CHIP(__configure_chip) \ + .configure_chip = __configure_chip + #define SPINAND_CONT_READ(__set_cont_read) \ .set_cont_read = __set_cont_read @@ -607,6 +621,7 @@ struct spinand_dirmap { * passed in spi_mem_op be DMA-able, so we can't based the bufs on * the stack * @manufacturer: SPI NAND manufacturer information + * @configure_chip: Align the chip configuration with the core settings * @cont_read_possible: Field filled by the core once the whole system * configuration is known to tell whether continuous reads are * suitable to use or not in general with this chip/configuration. @@ -647,6 +662,7 @@ struct spinand_device { const struct spinand_manufacturer *manufacturer; void *priv; + int (*configure_chip)(struct spinand_device *spinand); bool cont_read_possible; int (*set_cont_read)(struct spinand_device *spinand, bool enable); @@ -723,7 +739,9 @@ int spinand_match_and_init(struct spinand_device *spinand, enum spinand_readid_method rdid_method); int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val); +int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val); int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val); +int spinand_write_enable_op(struct spinand_device *spinand); int spinand_select_target(struct spinand_device *spinand, unsigned int target); int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us, diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index 562f92504f2b..c3f79c4be1cc 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h @@ -250,7 +250,6 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum); int ubi_leb_map(struct ubi_volume_desc *desc, int lnum); int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum); int ubi_sync(int ubi_num); -int ubi_flush(int ubi_num, int vol_id, int lnum); /* * This function is the same as the 'ubi_leb_read()' function, but it does not diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 00afd341d293..847b81ca6436 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h @@ -227,7 +227,7 @@ extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); DEFINE_GUARD(mutex, struct mutex *, mutex_lock(_T), mutex_unlock(_T)) DEFINE_GUARD_COND(mutex, _try, mutex_trylock(_T)) -DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T) == 0) +DEFINE_GUARD_COND(mutex, _intr, mutex_lock_interruptible(_T), _RET == 0) extern unsigned long mutex_get_owner(struct mutex *lock); diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 67ae2c3f41d2..c585939b6cd6 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h @@ -161,6 +161,12 @@ struct nfs_inode { unsigned long cache_validity; /* bit mask */ /* + * NFS Attributes not included in struct inode + */ + + struct timespec64 btime; + + /* * read_cache_jiffies is when we started read-caching this inode. * attrtimeo is for how long the cached information is assumed * to be valid. A successful attribute revalidation doubles @@ -316,10 +322,12 @@ struct nfs4_copy_state { #define NFS_INO_INVALID_XATTR BIT(15) /* xattrs are invalid */ #define NFS_INO_INVALID_NLINK BIT(16) /* cached nlinks is invalid */ #define NFS_INO_INVALID_MODE BIT(17) /* cached mode is invalid */ +#define NFS_INO_INVALID_BTIME BIT(18) /* cached btime is invalid */ #define NFS_INO_INVALID_ATTR (NFS_INO_INVALID_CHANGE \ | NFS_INO_INVALID_CTIME \ | NFS_INO_INVALID_MTIME \ + | NFS_INO_INVALID_BTIME \ | NFS_INO_INVALID_SIZE \ | NFS_INO_INVALID_NLINK \ | NFS_INO_INVALID_MODE \ diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 63141320c2a8..d30c0245031c 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -172,12 +172,11 @@ struct nfs_server { #define NFS_MOUNT_FORCE_RDIRPLUS 0x20000000 #define NFS_MOUNT_NETUNREACH_FATAL 0x40000000 - unsigned int fattr_valid; /* Valid attributes */ unsigned int caps; /* server capabilities */ + __u64 fattr_valid; /* Valid attributes */ unsigned int rsize; /* read size */ unsigned int rpages; /* read size (in pages) */ unsigned int wsize; /* write size */ - unsigned int wpages; /* write size (in pages) */ unsigned int wtmult; /* server disk block size */ unsigned int dtsize; /* readdir size */ unsigned short port; /* "port=" setting */ @@ -203,7 +202,6 @@ struct nfs_server { struct nfs_fsid fsid; int s_sysfs_id; /* sysfs dentry index */ __u64 maxfilesize; /* maximum file size */ - struct timespec64 time_delta; /* smallest time granularity */ unsigned long mount_time; /* when this fs was mounted */ struct super_block *super; /* VFS super block */ dev_t s_dev; /* superblock dev numbers */ @@ -248,7 +246,6 @@ struct nfs_server { filesystem */ struct pnfs_layoutdriver_type *pnfs_curr_ld; /* Active layout driver */ struct rpc_wait_queue roc_rpcwaitq; - void *pnfs_ld_data; /* per mount point data */ /* the following fields are protected by nfs_client->cl_lock */ struct rb_root state_owners; @@ -257,6 +254,9 @@ struct nfs_server { struct list_head state_owners_lru; struct list_head layouts; struct list_head delegations; + atomic_long_t nr_active_delegations; + unsigned int delegation_hash_mask; + struct hlist_head *delegation_hash_table; struct list_head ss_copies; struct list_head ss_src_copies; diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 67f6632f723b..ac4bff6e9913 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -45,7 +45,7 @@ struct nfs4_threshold { }; struct nfs_fattr { - unsigned int valid; /* which fields are valid */ + __u64 valid; /* which fields are valid */ umode_t mode; __u32 nlink; kuid_t uid; @@ -67,6 +67,7 @@ struct nfs_fattr { struct timespec64 atime; struct timespec64 mtime; struct timespec64 ctime; + struct timespec64 btime; __u64 change_attr; /* NFSv4 change attribute */ __u64 pre_change_attr;/* pre-op NFSv4 change attribute */ __u64 pre_size; /* pre_op_attr.size */ @@ -80,32 +81,33 @@ struct nfs_fattr { struct nfs4_label *label; }; -#define NFS_ATTR_FATTR_TYPE (1U << 0) -#define NFS_ATTR_FATTR_MODE (1U << 1) -#define NFS_ATTR_FATTR_NLINK (1U << 2) -#define NFS_ATTR_FATTR_OWNER (1U << 3) -#define NFS_ATTR_FATTR_GROUP (1U << 4) -#define NFS_ATTR_FATTR_RDEV (1U << 5) -#define NFS_ATTR_FATTR_SIZE (1U << 6) -#define NFS_ATTR_FATTR_PRESIZE (1U << 7) -#define NFS_ATTR_FATTR_BLOCKS_USED (1U << 8) -#define NFS_ATTR_FATTR_SPACE_USED (1U << 9) -#define NFS_ATTR_FATTR_FSID (1U << 10) -#define NFS_ATTR_FATTR_FILEID (1U << 11) -#define NFS_ATTR_FATTR_ATIME (1U << 12) -#define NFS_ATTR_FATTR_MTIME (1U << 13) -#define NFS_ATTR_FATTR_CTIME (1U << 14) -#define NFS_ATTR_FATTR_PREMTIME (1U << 15) -#define NFS_ATTR_FATTR_PRECTIME (1U << 16) -#define NFS_ATTR_FATTR_CHANGE (1U << 17) -#define NFS_ATTR_FATTR_PRECHANGE (1U << 18) -#define NFS_ATTR_FATTR_V4_LOCATIONS (1U << 19) -#define NFS_ATTR_FATTR_V4_REFERRAL (1U << 20) -#define NFS_ATTR_FATTR_MOUNTPOINT (1U << 21) -#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID (1U << 22) -#define NFS_ATTR_FATTR_OWNER_NAME (1U << 23) -#define NFS_ATTR_FATTR_GROUP_NAME (1U << 24) -#define NFS_ATTR_FATTR_V4_SECURITY_LABEL (1U << 25) +#define NFS_ATTR_FATTR_TYPE BIT_ULL(0) +#define NFS_ATTR_FATTR_MODE BIT_ULL(1) +#define NFS_ATTR_FATTR_NLINK BIT_ULL(2) +#define NFS_ATTR_FATTR_OWNER BIT_ULL(3) +#define NFS_ATTR_FATTR_GROUP BIT_ULL(4) +#define NFS_ATTR_FATTR_RDEV BIT_ULL(5) +#define NFS_ATTR_FATTR_SIZE BIT_ULL(6) +#define NFS_ATTR_FATTR_PRESIZE BIT_ULL(7) +#define NFS_ATTR_FATTR_BLOCKS_USED BIT_ULL(8) +#define NFS_ATTR_FATTR_SPACE_USED BIT_ULL(9) +#define NFS_ATTR_FATTR_FSID BIT_ULL(10) +#define NFS_ATTR_FATTR_FILEID BIT_ULL(11) +#define NFS_ATTR_FATTR_ATIME BIT_ULL(12) +#define NFS_ATTR_FATTR_MTIME BIT_ULL(13) +#define NFS_ATTR_FATTR_CTIME BIT_ULL(14) +#define NFS_ATTR_FATTR_PREMTIME BIT_ULL(15) +#define NFS_ATTR_FATTR_PRECTIME BIT_ULL(16) +#define NFS_ATTR_FATTR_CHANGE BIT_ULL(17) +#define NFS_ATTR_FATTR_PRECHANGE BIT_ULL(18) +#define NFS_ATTR_FATTR_V4_LOCATIONS BIT_ULL(19) +#define NFS_ATTR_FATTR_V4_REFERRAL BIT_ULL(20) +#define NFS_ATTR_FATTR_MOUNTPOINT BIT_ULL(21) +#define NFS_ATTR_FATTR_MOUNTED_ON_FILEID BIT_ULL(22) +#define NFS_ATTR_FATTR_OWNER_NAME BIT_ULL(23) +#define NFS_ATTR_FATTR_GROUP_NAME BIT_ULL(24) +#define NFS_ATTR_FATTR_V4_SECURITY_LABEL BIT_ULL(25) +#define NFS_ATTR_FATTR_BTIME BIT_ULL(26) #define NFS_ATTR_FATTR (NFS_ATTR_FATTR_TYPE \ | NFS_ATTR_FATTR_MODE \ @@ -126,6 +128,7 @@ struct nfs_fattr { | NFS_ATTR_FATTR_SPACE_USED) #define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ | NFS_ATTR_FATTR_SPACE_USED \ + | NFS_ATTR_FATTR_BTIME \ | NFS_ATTR_FATTR_V4_SECURITY_LABEL) /* diff --git a/include/linux/node.h b/include/linux/node.h index 2b7517892230..2c7529335b21 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -111,43 +111,64 @@ struct memory_block; extern struct node *node_devices[]; #if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA) -void register_memory_blocks_under_node(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context); +void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn, + unsigned long end_pfn); #else -static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn, - unsigned long end_pfn, - enum meminit_context context) +static inline void register_memory_blocks_under_node_hotplug(int nid, + unsigned long start_pfn, + unsigned long end_pfn) +{ +} +static inline void register_memory_blocks_under_nodes(void) { } #endif extern void unregister_node(struct node *node); -#ifdef CONFIG_NUMA -extern void node_dev_init(void); -/* Core of the node registration - only memory hotplug should use this */ -extern int __register_one_node(int nid); - -/* Registers an online node */ -static inline int register_one_node(int nid) -{ - int error = 0; - if (node_online(nid)) { - struct pglist_data *pgdat = NODE_DATA(nid); - unsigned long start_pfn = pgdat->node_start_pfn; - unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; +struct node_notify { + int nid; +}; - error = __register_one_node(nid); - if (error) - return error; - register_memory_blocks_under_node(nid, start_pfn, end_pfn, - MEMINIT_EARLY); - } +#define NODE_ADDING_FIRST_MEMORY (1<<0) +#define NODE_ADDED_FIRST_MEMORY (1<<1) +#define NODE_CANCEL_ADDING_FIRST_MEMORY (1<<2) +#define NODE_REMOVING_LAST_MEMORY (1<<3) +#define NODE_REMOVED_LAST_MEMORY (1<<4) +#define NODE_CANCEL_REMOVING_LAST_MEMORY (1<<5) - return error; +#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA) +extern int register_node_notifier(struct notifier_block *nb); +extern void unregister_node_notifier(struct notifier_block *nb); +extern int node_notify(unsigned long val, void *v); + +#define hotplug_node_notifier(fn, pri) ({ \ + static __meminitdata struct notifier_block fn##_node_nb =\ + { .notifier_call = fn, .priority = pri };\ + register_node_notifier(&fn##_node_nb); \ +}) +#else +static inline int register_node_notifier(struct notifier_block *nb) +{ + return 0; +} +static inline void unregister_node_notifier(struct notifier_block *nb) +{ +} +static inline int node_notify(unsigned long val, void *v) +{ + return 0; +} +static inline int hotplug_node_notifier(notifier_fn_t fn, int pri) +{ + return 0; } +#endif +#ifdef CONFIG_NUMA +extern void node_dev_init(void); +/* Core of the node registration - only memory hotplug should use this */ +extern int register_one_node(int nid); extern void unregister_one_node(int nid); extern int register_cpu_under_node(unsigned int cpu, unsigned int nid); extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid); @@ -160,10 +181,6 @@ extern int register_memory_node_under_compute_node(unsigned int mem_nid, static inline void node_dev_init(void) { } -static inline int __register_one_node(int nid) -{ - return 0; -} static inline int register_one_node(int nid) { return 0; diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index f08ae71585fa..7ad1f5c7407e 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h @@ -492,21 +492,9 @@ static __always_inline int num_node_state(enum node_states state) static __always_inline int node_random(const nodemask_t *maskp) { #if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1) - int w, bit; - - w = nodes_weight(*maskp); - switch (w) { - case 0: - bit = NUMA_NO_NODE; - break; - case 1: - bit = first_node(*maskp); - break; - default: - bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w)); - break; - } - return bit; + int node = find_random_bit(maskp->bits, MAX_NUMNODES); + + return node < MAX_NUMNODES ? node : NUMA_NO_NODE; #else return 0; #endif diff --git a/include/linux/padata.h b/include/linux/padata.h index 0146daf34430..765f2778e264 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h @@ -90,8 +90,6 @@ struct padata_cpumask { * @processed: Number of already processed objects. * @cpu: Next CPU to be processed. * @cpumask: The cpumasks in use for parallel and serial workers. - * @reorder_work: work struct for reordering. - * @lock: Reorder lock. */ struct parallel_data { struct padata_shell *ps; @@ -102,8 +100,6 @@ struct parallel_data { unsigned int processed; int cpu; struct padata_cpumask cpumask; - struct work_struct reorder_work; - spinlock_t ____cacheline_aligned lock; }; /** diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 4fe5ee67535b..8d3fa3a91ce4 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h @@ -167,8 +167,12 @@ enum pageflags { /* Remapped by swiotlb-xen. */ PG_xen_remapped = PG_owner_priv_1, - /* non-lru isolated movable page */ - PG_isolated = PG_reclaim, +#ifdef CONFIG_MIGRATION + /* movable_ops page that is isolated for migration */ + PG_movable_ops_isolated = PG_reclaim, + /* this is a movable_ops page (for selected typed pages only) */ + PG_movable_ops = PG_uptodate, +#endif /* Only valid for buddy pages. Used to track pages that are reported */ PG_reported = PG_uptodate, @@ -691,15 +695,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) /* * On an anonymous folio mapped into a user virtual memory area, * folio->mapping points to its anon_vma, not to a struct address_space; - * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h. + * with the FOLIO_MAPPING_ANON bit set to distinguish it. See rmap.h. * - * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled, - * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON + * On an anonymous folio in a VM_MERGEABLE area, if CONFIG_KSM is enabled, + * the FOLIO_MAPPING_ANON_KSM bit may be set along with the FOLIO_MAPPING_ANON * bit; and then folio->mapping points, not to an anon_vma, but to a private - * structure which KSM associates with that merged page. See ksm.h. - * - * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable - * page and then folio->mapping points to a struct movable_operations. + * structure which KSM associates with that merged folio. See ksm.h. * * Please note that, confusingly, "folio_mapping" refers to the inode * address_space which maps the folio from disk; whereas "folio_mapped" @@ -712,50 +713,27 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted) * false before calling the following functions (e.g., folio_test_anon). * See mm/slab.h. */ -#define PAGE_MAPPING_ANON 0x1 -#define PAGE_MAPPING_MOVABLE 0x2 -#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) -#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE) - -static __always_inline bool folio_mapping_flags(const struct folio *folio) -{ - return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0; -} - -static __always_inline bool PageMappingFlags(const struct page *page) -{ - return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0; -} +#define FOLIO_MAPPING_ANON 0x1 +#define FOLIO_MAPPING_ANON_KSM 0x2 +#define FOLIO_MAPPING_KSM (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM) +#define FOLIO_MAPPING_FLAGS (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM) static __always_inline bool folio_test_anon(const struct folio *folio) { - return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0; + return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0; } static __always_inline bool PageAnonNotKsm(const struct page *page) { unsigned long flags = (unsigned long)page_folio(page)->mapping; - return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON; + return (flags & FOLIO_MAPPING_FLAGS) == FOLIO_MAPPING_ANON; } static __always_inline bool PageAnon(const struct page *page) { return folio_test_anon(page_folio(page)); } - -static __always_inline bool __folio_test_movable(const struct folio *folio) -{ - return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == - PAGE_MAPPING_MOVABLE; -} - -static __always_inline bool __PageMovable(const struct page *page) -{ - return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) == - PAGE_MAPPING_MOVABLE; -} - #ifdef CONFIG_KSM /* * A KSM page is one of those write-protected "shared pages" or "merged pages" @@ -765,8 +743,8 @@ static __always_inline bool __PageMovable(const struct page *page) */ static __always_inline bool folio_test_ksm(const struct folio *folio) { - return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) == - PAGE_MAPPING_KSM; + return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) == + FOLIO_MAPPING_KSM; } #else FOLIO_TEST_FLAG_FALSE(ksm) @@ -859,8 +837,6 @@ void set_page_writeback(struct page *page); #define folio_start_writeback(folio) \ __folio_start_writeback(folio, false) -#define folio_start_writeback_keepwrite(folio) \ - __folio_start_writeback(folio, true) static __always_inline bool folio_test_head(const struct folio *folio) { @@ -1137,7 +1113,53 @@ static inline bool folio_contain_hwpoisoned_page(struct folio *folio) bool is_free_buddy_page(const struct page *page); -PAGEFLAG(Isolated, isolated, PF_ANY); +#ifdef CONFIG_MIGRATION +/* + * This page is migratable through movable_ops (for selected typed pages + * only). + * + * Page migration of such pages might fail, for example, if the page is + * already isolated by somebody else, or if the page is about to get freed. + * + * While a subsystem might set selected typed pages that support page migration + * as being movable through movable_ops, it must never clear this flag. + * + * This flag is only cleared when the page is freed back to the buddy. + * + * Only selected page types support this flag (see page_movable_ops()) and + * the flag might be used in other context for other pages. Always use + * page_has_movable_ops() instead. + */ +TESTPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL); +SETPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL); +/* + * A movable_ops page has this flag set while it is isolated for migration. + * This flag primarily protects against concurrent migration attempts. + * + * Once migration ended (success or failure), the flag is cleared. The + * flag is managed by the migration core. + */ +PAGEFLAG(MovableOpsIsolated, movable_ops_isolated, PF_NO_TAIL); +#else /* !CONFIG_MIGRATION */ +TESTPAGEFLAG_FALSE(MovableOps, movable_ops); +SETPAGEFLAG_NOOP(MovableOps, movable_ops); +PAGEFLAG_FALSE(MovableOpsIsolated, movable_ops_isolated); +#endif /* CONFIG_MIGRATION */ + +/** + * page_has_movable_ops - test for a movable_ops page + * @page: The page to test. + * + * Test whether this is a movable_ops page. Such pages will stay that + * way until freed. + * + * Returns true if this is a movable_ops page, otherwise false. + */ +static inline bool page_has_movable_ops(const struct page *page) +{ + return PageMovableOps(page) && + (PageOffline(page) || PageZsmalloc(page)); +} static __always_inline int PageAnonExclusive(const struct page *page) { diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 898bb788243b..3e2f960e166c 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h @@ -11,6 +11,12 @@ static inline bool is_migrate_isolate(int migratetype) { return migratetype == MIGRATE_ISOLATE; } +#define get_pageblock_isolate(page) \ + get_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate) +#define clear_pageblock_isolate(page) \ + clear_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate) +#define set_pageblock_isolate(page) \ + set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate) #else static inline bool is_migrate_isolate_page(struct page *page) { @@ -20,22 +26,45 @@ static inline bool is_migrate_isolate(int migratetype) { return false; } +static inline bool get_pageblock_isolate(struct page *page) +{ + return false; +} +static inline void clear_pageblock_isolate(struct page *page) +{ +} +static inline void set_pageblock_isolate(struct page *page) +{ +} #endif -#define MEMORY_OFFLINE 0x1 -#define REPORT_FAILURE 0x2 +/* + * Pageblock isolation modes: + * PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory + * e.g., skip over PageHWPoison() pages and + * PageOffline() pages. Unmovable pages will be + * reported in this mode. + * PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations + * PB_ISOLATE_MODE_OTHER - isolate for other purposes + */ +enum pb_isolate_mode { + PB_ISOLATE_MODE_MEM_OFFLINE, + PB_ISOLATE_MODE_CMA_ALLOC, + PB_ISOLATE_MODE_OTHER, +}; -void set_pageblock_migratetype(struct page *page, int migratetype); +void __meminit init_pageblock_migratetype(struct page *page, + enum migratetype migratetype, + bool isolate); -bool move_freepages_block_isolate(struct zone *zone, struct page *page, - int migratetype); +bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page); +bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page); int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype, int flags); + enum pb_isolate_mode mode); -void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, - int migratetype); +void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn); int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn, - int isol_flags); + enum pb_isolate_mode mode); #endif diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h index debdc25f08b9..3328357f6dba 100644 --- a/include/linux/page_owner.h +++ b/include/linux/page_owner.h @@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page, extern void __split_page_owner(struct page *page, int old_order, int new_order); extern void __folio_copy_owner(struct folio *newfolio, struct folio *old); -extern void __set_page_owner_migrate_reason(struct page *page, int reason); +extern void __folio_set_owner_migrate_reason(struct folio *folio, int reason); extern void __dump_page_owner(const struct page *page); extern void pagetypeinfo_showmixedcount_print(struct seq_file *m, pg_data_t *pgdat, struct zone *zone); @@ -43,10 +43,10 @@ static inline void folio_copy_owner(struct folio *newfolio, struct folio *old) if (static_branch_unlikely(&page_owner_inited)) __folio_copy_owner(newfolio, old); } -static inline void set_page_owner_migrate_reason(struct page *page, int reason) +static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason) { if (static_branch_unlikely(&page_owner_inited)) - __set_page_owner_migrate_reason(page, reason); + __folio_set_owner_migrate_reason(folio, reason); } static inline void dump_page_owner(const struct page *page) { @@ -68,7 +68,7 @@ static inline void split_page_owner(struct page *page, int old_order, static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio) { } -static inline void set_page_owner_migrate_reason(struct page *page, int reason) +static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason) { } static inline void dump_page_owner(const struct page *page) diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h index e73a4292ef02..6a44be0f39f4 100644 --- a/include/linux/pageblock-flags.h +++ b/include/linux/pageblock-flags.h @@ -19,15 +19,33 @@ enum pageblock_bits { PB_migrate, PB_migrate_end = PB_migrate + PB_migratetype_bits - 1, /* 3 bits required for migrate types */ - PB_migrate_skip,/* If set the block is skipped by compaction */ + PB_compact_skip,/* If set the block is skipped by compaction */ +#ifdef CONFIG_MEMORY_ISOLATION + /* + * Pageblock isolation is represented with a separate bit, so that + * the migratetype of a block is not overwritten by isolation. + */ + PB_migrate_isolate, /* If set the block is isolated */ +#endif /* * Assume the bits will always align on a word. If this assumption * changes then get/set pageblock needs updating. */ - NR_PAGEBLOCK_BITS + __NR_PAGEBLOCK_BITS }; +#define NR_PAGEBLOCK_BITS (roundup_pow_of_two(__NR_PAGEBLOCK_BITS)) + +#define MIGRATETYPE_MASK ((1UL << (PB_migrate_end + 1)) - 1) + +#ifdef CONFIG_MEMORY_ISOLATION +#define MIGRATETYPE_AND_ISO_MASK \ + (((1UL << (PB_migrate_end + 1)) - 1) | BIT(PB_migrate_isolate)) +#else +#define MIGRATETYPE_AND_ISO_MASK MIGRATETYPE_MASK +#endif + #if defined(CONFIG_HUGETLB_PAGE) #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE @@ -41,18 +59,18 @@ extern unsigned int pageblock_order; * Huge pages are a constant size, but don't exceed the maximum allocation * granularity. */ -#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, PAGE_BLOCK_ORDER) +#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, PAGE_BLOCK_MAX_ORDER) #endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ #elif defined(CONFIG_TRANSPARENT_HUGEPAGE) -#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, PAGE_BLOCK_ORDER) +#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, PAGE_BLOCK_MAX_ORDER) #else /* CONFIG_TRANSPARENT_HUGEPAGE */ -/* If huge pages are not used, group by PAGE_BLOCK_ORDER */ -#define pageblock_order PAGE_BLOCK_ORDER +/* If huge pages are not used, group by PAGE_BLOCK_MAX_ORDER */ +#define pageblock_order PAGE_BLOCK_MAX_ORDER #endif /* CONFIG_HUGETLB_PAGE */ @@ -65,27 +83,23 @@ extern unsigned int pageblock_order; /* Forward declaration */ struct page; -unsigned long get_pfnblock_flags_mask(const struct page *page, - unsigned long pfn, - unsigned long mask); - -void set_pfnblock_flags_mask(struct page *page, - unsigned long flags, - unsigned long pfn, - unsigned long mask); +enum migratetype get_pfnblock_migratetype(const struct page *page, + unsigned long pfn); +bool get_pfnblock_bit(const struct page *page, unsigned long pfn, + enum pageblock_bits pb_bit); +void set_pfnblock_bit(const struct page *page, unsigned long pfn, + enum pageblock_bits pb_bit); +void clear_pfnblock_bit(const struct page *page, unsigned long pfn, + enum pageblock_bits pb_bit); /* Declarations for getting and setting flags. See mm/page_alloc.c */ #ifdef CONFIG_COMPACTION #define get_pageblock_skip(page) \ - get_pfnblock_flags_mask(page, page_to_pfn(page), \ - (1 << (PB_migrate_skip))) + get_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip) #define clear_pageblock_skip(page) \ - set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \ - (1 << PB_migrate_skip)) + clear_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip) #define set_pageblock_skip(page) \ - set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \ - page_to_pfn(page), \ - (1 << PB_migrate_skip)) + set_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip) #else static inline bool get_pageblock_skip(struct page *page) { diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index ce2bcdcadb73..12a12dae727d 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -502,7 +502,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping, static inline bool mapping_large_folio_support(struct address_space *mapping) { /* AS_FOLIO_ORDER is only reasonable for pagecache folios */ - VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON, + VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON, "Anonymous mapping always supports large folio"); return mapping_max_folio_order(mapping) > 0; @@ -905,7 +905,8 @@ static inline struct page *find_or_create_page(struct address_space *mapping, * @mapping: target address_space * @index: the page index * - * Same as grab_cache_page(), but do not wait if the page is unavailable. + * Returns locked page at given index in given cache, creating it if + * needed, but do not wait if the page is locked or to reclaim memory. * This is intended for speculative data generators, where the data can * be regenerated if the page couldn't be grabbed. This routine should * be safe to call while holding the lock for another page. @@ -969,15 +970,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping, unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch); -/* - * Returns locked page at given index in given cache, creating it if needed. - */ -static inline struct page *grab_cache_page(struct address_space *mapping, - pgoff_t index) -{ - return find_or_create_page(mapping, index, mapping_gfp_mask(mapping)); -} - struct folio *read_cache_folio(struct address_space *, pgoff_t index, filler_t *filler, struct file *file); struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index, diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h index 9700a29f8afb..682472c15495 100644 --- a/include/linux/pagewalk.h +++ b/include/linux/pagewalk.h @@ -14,6 +14,8 @@ enum page_walk_lock { PGWALK_WRLOCK = 1, /* vma is expected to be already write-locked during the walk */ PGWALK_WRLOCK_VERIFY = 2, + /* vma is expected to be already read-locked during the walk */ + PGWALK_VMA_RDLOCK_VERIFY = 3, }; /** @@ -129,10 +131,9 @@ struct mm_walk { int walk_page_range(struct mm_struct *mm, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private); -int walk_page_range_novma(struct mm_struct *mm, unsigned long start, - unsigned long end, const struct mm_walk_ops *ops, - pgd_t *pgd, - void *private); +int walk_kernel_page_table_range(unsigned long start, + unsigned long end, const struct mm_walk_ops *ops, + pgd_t *pgd, void *private); int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start, unsigned long end, const struct mm_walk_ops *ops, void *private); diff --git a/include/linux/pci-ep-msi.h b/include/linux/pci-ep-msi.h new file mode 100644 index 000000000000..7c5db90f9620 --- /dev/null +++ b/include/linux/pci-ep-msi.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * PCI Endpoint *Function* side MSI header file + * + * Copyright (C) 2024 NXP + * Author: Frank Li <Frank.Li@nxp.com> + */ + +#ifndef __PCI_EP_MSI__ +#define __PCI_EP_MSI__ + +struct pci_epf; + +#ifdef CONFIG_PCI_ENDPOINT_MSI_DOORBELL +int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 nums); +void pci_epf_free_doorbell(struct pci_epf *epf); +#else +static inline int pci_epf_alloc_doorbell(struct pci_epf *epf, u16 nums) +{ + return -ENODATA; +} + +static inline void pci_epf_free_doorbell(struct pci_epf *epf) +{ +} +#endif /* CONFIG_GENERIC_MSI_IRQ */ + +#endif /* __PCI_EP_MSI__ */ diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 749cee0bcf2c..2e85504ba2ba 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h @@ -12,6 +12,7 @@ #include <linux/configfs.h> #include <linux/device.h> #include <linux/mod_devicetable.h> +#include <linux/msi.h> #include <linux/pci.h> struct pci_epf; @@ -129,6 +130,16 @@ struct pci_epf_bar { }; /** + * struct pci_epf_doorbell_msg - represents doorbell message + * @msg: MSI message + * @virq: IRQ number of this doorbell MSI message + */ +struct pci_epf_doorbell_msg { + struct msi_msg msg; + int virq; +}; + +/** * struct pci_epf - represents the PCI EPF device * @dev: the PCI EPF device * @name: the name of the PCI EPF device @@ -155,6 +166,8 @@ struct pci_epf_bar { * @vfunction_num_map: bitmap to manage virtual function number * @pci_vepf: list of virtual endpoint functions associated with this function * @event_ops: callbacks for capturing the EPC events + * @db_msg: data for MSI from RC side + * @num_db: number of doorbells */ struct pci_epf { struct device dev; @@ -185,6 +198,8 @@ struct pci_epf { unsigned long vfunction_num_map; struct list_head pci_vepf; const struct pci_epc_event_ops *event_ops; + struct pci_epf_doorbell_msg *db_msg; + u16 num_db; }; /** @@ -226,6 +241,9 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar, enum pci_epc_interface_type type); void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar, enum pci_epc_interface_type type); + +int pci_epf_align_inbound_addr(struct pci_epf *epf, enum pci_barno bar, + u64 addr, dma_addr_t *base, size_t *off); int pci_epf_bind(struct pci_epf *epf); void pci_epf_unbind(struct pci_epf *epf); int pci_epf_add_vepf(struct pci_epf *epf_pf, struct pci_epf *epf_vf); diff --git a/include/linux/pci-pwrctrl.h b/include/linux/pci-pwrctrl.h index 7d439b0675e9..4aefc7901cd1 100644 --- a/include/linux/pci-pwrctrl.h +++ b/include/linux/pci-pwrctrl.h @@ -39,7 +39,7 @@ struct device_link; struct pci_pwrctrl { struct device *dev; - /* Private: don't use. */ + /* private: internal use only */ struct notifier_block nb; struct device_link *link; struct work_struct work; diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h index c3e806c13d64..9e4e331b1603 100644 --- a/include/linux/pci-tph.h +++ b/include/linux/pci-tph.h @@ -28,6 +28,7 @@ int pcie_tph_get_cpu_st(struct pci_dev *dev, unsigned int cpu_uid, u16 *tag); void pcie_disable_tph(struct pci_dev *pdev); int pcie_enable_tph(struct pci_dev *pdev, int mode); +u16 pcie_tph_get_st_table_size(struct pci_dev *pdev); #else static inline int pcie_tph_set_st_entry(struct pci_dev *pdev, unsigned int index, u16 tag) diff --git a/include/linux/pci.h b/include/linux/pci.h index 05e68f35f392..59876de13860 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -328,6 +328,11 @@ struct rcec_ea; * determined (e.g., for Root Complex Integrated * Endpoints without the relevant Capability * Registers). + * @is_hotplug_bridge: Hotplug bridge of any kind (e.g. PCIe Hot-Plug Capable, + * Conventional PCI Hot-Plug, ACPI slot). + * Such bridges are allocated additional MMIO and bus + * number resources to allow for hierarchy expansion. + * @is_pciehp: PCIe Hot-Plug Capable bridge. */ struct pci_dev { struct list_head bus_list; /* Node in per-bus list */ @@ -451,6 +456,7 @@ struct pci_dev { unsigned int is_physfn:1; unsigned int is_virtfn:1; unsigned int is_hotplug_bridge:1; + unsigned int is_pciehp:1; unsigned int shpc_managed:1; /* SHPC owned by shpchp */ unsigned int is_thunderbolt:1; /* Thunderbolt controller */ /* @@ -744,6 +750,21 @@ static inline bool pci_is_vga(struct pci_dev *pdev) return false; } +/** + * pci_is_display - check if the PCI device is a display controller + * @pdev: PCI device + * + * Determine whether the given PCI device corresponds to a display + * controller. Display controllers are typically used for graphical output + * and are identified based on their class code. + * + * Return: true if the PCI device is a display controller, false otherwise. + */ +static inline bool pci_is_display(struct pci_dev *pdev) +{ + return (pdev->class >> 16) == PCI_BASE_CLASS_DISPLAY; +} + #define for_each_pci_bridge(dev, bus) \ list_for_each_entry(dev, &bus->devices, bus_list) \ if (!pci_is_bridge(dev)) {} else @@ -2438,6 +2459,8 @@ int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); int pci_sriov_get_totalvfs(struct pci_dev *dev); int pci_sriov_configure_simple(struct pci_dev *dev, int nr_virtfn); resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); +int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size); +u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs); void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); /* Arch may override these (weak) */ @@ -2490,6 +2513,10 @@ static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) #define pci_sriov_configure_simple NULL static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) { return 0; } +static inline int pci_iov_vf_bar_set_size(struct pci_dev *dev, int resno, int size) +{ return -ENODEV; } +static inline u32 pci_iov_vf_bar_get_sizes(struct pci_dev *dev, int resno, int num_vfs) +{ return 0; } static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } #endif diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index ec77ccf1fc4d..ddf79641917f 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h @@ -104,6 +104,7 @@ static inline bool shpchp_is_native(struct pci_dev *bridge) { return true; } static inline bool hotplug_is_native(struct pci_dev *bridge) { - return pciehp_is_native(bridge) || shpchp_is_native(bridge); + return (bridge->is_pciehp && pciehp_is_native(bridge)) || + shpchp_is_native(bridge); } #endif diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index c16cdeaa505e..12d90360f6db 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h @@ -63,14 +63,15 @@ * 1. The symbol must be globally unique, even the static ones. * 2. Static percpu variables cannot be defined inside a function. * - * Archs which need weak percpu definitions should define - * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary. + * Archs which need weak percpu definitions should set + * CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU when necessary. * * To ensure that the generic code observes the above two * restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak * definition is used for all cases. */ -#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) +#if (defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)) || \ + defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU) /* * __pcpu_scope_* dummy variable is used to enforce scope. It * receives the static modifier when it's used in front of diff --git a/include/linux/pfn.h b/include/linux/pfn.h index 14bc053c53d8..b90ca0b6c331 100644 --- a/include/linux/pfn.h +++ b/include/linux/pfn.h @@ -4,15 +4,6 @@ #ifndef __ASSEMBLY__ #include <linux/types.h> - -/* - * pfn_t: encapsulates a page-frame number that is optionally backed - * by memmap (struct page). Whether a pfn_t has a 'struct page' - * backing is indicated by flags in the high bits of the value. - */ -typedef struct { - u64 val; -} pfn_t; #endif #define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK) diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h deleted file mode 100644 index 2d9148221e9a..000000000000 --- a/include/linux/pfn_t.h +++ /dev/null @@ -1,131 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _LINUX_PFN_T_H_ -#define _LINUX_PFN_T_H_ -#include <linux/mm.h> - -/* - * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags - * PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry - * PFN_SG_LAST - pfn references a page and is the last scatterlist entry - * PFN_DEV - pfn is not covered by system memmap by default - * PFN_MAP - pfn has a dynamic page mapping established by a device driver - * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not - * get_user_pages - */ -#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT)) -#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1)) -#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) -#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) -#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) -#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5)) - -#define PFN_FLAGS_TRACE \ - { PFN_SPECIAL, "SPECIAL" }, \ - { PFN_SG_CHAIN, "SG_CHAIN" }, \ - { PFN_SG_LAST, "SG_LAST" }, \ - { PFN_DEV, "DEV" }, \ - { PFN_MAP, "MAP" } - -static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags) -{ - pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), }; - - return pfn_t; -} - -/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */ -static inline pfn_t pfn_to_pfn_t(unsigned long pfn) -{ - return __pfn_to_pfn_t(pfn, 0); -} - -static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags) -{ - return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags); -} - -static inline bool pfn_t_has_page(pfn_t pfn) -{ - return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0; -} - -static inline unsigned long pfn_t_to_pfn(pfn_t pfn) -{ - return pfn.val & ~PFN_FLAGS_MASK; -} - -static inline struct page *pfn_t_to_page(pfn_t pfn) -{ - if (pfn_t_has_page(pfn)) - return pfn_to_page(pfn_t_to_pfn(pfn)); - return NULL; -} - -static inline phys_addr_t pfn_t_to_phys(pfn_t pfn) -{ - return PFN_PHYS(pfn_t_to_pfn(pfn)); -} - -static inline pfn_t page_to_pfn_t(struct page *page) -{ - return pfn_to_pfn_t(page_to_pfn(page)); -} - -static inline int pfn_t_valid(pfn_t pfn) -{ - return pfn_valid(pfn_t_to_pfn(pfn)); -} - -#ifdef CONFIG_MMU -static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot) -{ - return pfn_pte(pfn_t_to_pfn(pfn), pgprot); -} -#endif - -#ifdef CONFIG_TRANSPARENT_HUGEPAGE -static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot) -{ - return pfn_pmd(pfn_t_to_pfn(pfn), pgprot); -} - -#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD -static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot) -{ - return pfn_pud(pfn_t_to_pfn(pfn), pgprot); -} -#endif -#endif - -#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP -static inline bool pfn_t_devmap(pfn_t pfn) -{ - const u64 flags = PFN_DEV|PFN_MAP; - - return (pfn.val & flags) == flags; -} -#else -static inline bool pfn_t_devmap(pfn_t pfn) -{ - return false; -} -pte_t pte_mkdevmap(pte_t pte); -pmd_t pmd_mkdevmap(pmd_t pmd); -#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \ - defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) -pud_t pud_mkdevmap(pud_t pud); -#endif -#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */ - -#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL -static inline bool pfn_t_special(pfn_t pfn) -{ - return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL; -} -#else -static inline bool pfn_t_special(pfn_t pfn) -{ - return false; -} -#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */ -#endif /* _LINUX_PFN_T_H_ */ diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index 0b6e1f781d86..4c035637eeb7 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -456,6 +456,17 @@ static inline bool arch_has_hw_pte_young(void) } #endif +#ifndef exec_folio_order +/* + * Returns preferred minimum folio order for executable file-backed memory. Must + * be in range [0, PMD_ORDER). Default to order-0. + */ +static inline unsigned int exec_folio_order(void) +{ + return 0; +} +#endif + #ifndef arch_check_zapped_pte static inline void arch_check_zapped_pte(struct vm_area_struct *vma, pte_t pte) @@ -725,6 +736,29 @@ static inline pte_t get_and_clear_full_ptes(struct mm_struct *mm, } #endif +/** + * get_and_clear_ptes - Clear present PTEs that map consecutive pages of + * the same folio, collecting dirty/accessed bits. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * + * Use this instead of get_and_clear_full_ptes() if it is known that we don't + * need to clear the full mm, which is mostly the case. + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline pte_t get_and_clear_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + return get_and_clear_full_ptes(mm, addr, ptep, nr, 0); +} + #ifndef clear_full_ptes /** * clear_full_ptes - Clear present PTEs that map consecutive pages of the same @@ -757,6 +791,28 @@ static inline void clear_full_ptes(struct mm_struct *mm, unsigned long addr, } #endif +/** + * clear_ptes - Clear present PTEs that map consecutive pages of the same folio. + * @mm: Address space the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries to clear. + * + * Use this instead of clear_full_ptes() if it is known that we don't need to + * clear the full mm, which is mostly the case. + * + * Note that PTE bits in the PTE range besides the PFN can differ. For example, + * some PTEs might be write-protected. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. The PTEs are all in the same PMD. + */ +static inline void clear_ptes(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned int nr) +{ + clear_full_ptes(mm, addr, ptep, nr, 0); +} + /* * If two threads concurrently fault at the same page, the thread that * won the race updates the PTE and its local TLB/Cache. The other thread @@ -1320,7 +1376,9 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma, /* * Commit an update to a pte, leaving any hardware-controlled bits in - * the PTE unmodified. + * the PTE unmodified. The pte returned from ptep_modify_prot_start() may + * additionally have young and/or dirty bits set where previously they were not, + * so the updated pte may have these additional changes. */ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, @@ -1329,6 +1387,86 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma, __ptep_modify_prot_commit(vma, addr, ptep, pte); } #endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */ + +/** + * modify_prot_start_ptes - Start a pte protection read-modify-write transaction + * over a batch of ptes, which protects against asynchronous hardware + * modifications to the ptes. The intention is not to prevent the hardware from + * making pte updates, but to prevent any updates it may make from being lost. + * Please see the comment above ptep_modify_prot_start() for full description. + * + * @vma: The virtual memory area the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @nr: Number of entries. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte + * in the batch. + * + * Note that PTE bits in the PTE batch besides the PFN can differ. + * + * Context: The caller holds the page table lock. The PTEs map consecutive + * pages that belong to the same folio. All other PTE bits must be identical for + * all PTEs in the batch except for young and dirty bits. The PTEs are all in + * the same PMD. + */ +#ifndef modify_prot_start_ptes +static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, unsigned int nr) +{ + pte_t pte, tmp_pte; + + pte = ptep_modify_prot_start(vma, addr, ptep); + while (--nr) { + ptep++; + addr += PAGE_SIZE; + tmp_pte = ptep_modify_prot_start(vma, addr, ptep); + if (pte_dirty(tmp_pte)) + pte = pte_mkdirty(pte); + if (pte_young(tmp_pte)) + pte = pte_mkyoung(pte); + } + return pte; +} +#endif + +/** + * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any + * hardware-controlled bits in the PTE unmodified. + * + * @vma: The virtual memory area the pages are mapped into. + * @addr: Address the first page is mapped at. + * @ptep: Page table pointer for the first entry. + * @old_pte: Old page table entry (for the first entry) which is now cleared. + * @pte: New page table entry to be set. + * @nr: Number of entries. + * + * May be overridden by the architecture; otherwise, implemented as a simple + * loop over ptep_modify_prot_commit(). + * + * Context: The caller holds the page table lock. The PTEs are all in the same + * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by + * ptep_modify_prot_start() may additionally have young and/or dirty bits set + * where previously they were not, so the updated ptes may have these + * additional changes. + */ +#ifndef modify_prot_commit_ptes +static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr) +{ + int i; + + for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) { + ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte); + + /* Advance PFN only, set same prot */ + old_pte = pte_next_pfn(old_pte); + pte = pte_next_pfn(pte); + } +} +#endif + #endif /* CONFIG_MMU */ /* @@ -1632,21 +1770,6 @@ static inline int pud_write(pud_t pud) } #endif /* pud_write */ -#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE) -static inline int pmd_devmap(pmd_t pmd) -{ - return 0; -} -static inline int pud_devmap(pud_t pud) -{ - return 0; -} -static inline int pgd_devmap(pgd_t pgd) -{ - return 0; -} -#endif - #if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \ !defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) static inline int pud_trans_huge(pud_t pud) @@ -1661,7 +1784,7 @@ static inline int pud_trans_unstable(pud_t *pud) defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD) pud_t pudval = READ_ONCE(*pud); - if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval)) + if (pud_none(pudval) || pud_trans_huge(pudval)) return 1; if (unlikely(pud_bad(pudval))) { pud_clear_bad(pud); @@ -1901,8 +2024,8 @@ typedef unsigned int pgtbl_mod_mask; * - It should contain a huge PFN, which points to a huge page larger than * PAGE_SIZE of the platform. The PFN format isn't important here. * - * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(), - * pXd_devmap(), or hugetlb mappings). + * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge() + * or hugetlb mappings). */ #ifndef pgd_leaf #define pgd_leaf(x) false @@ -2005,7 +2128,7 @@ typedef unsigned int pgtbl_mod_mask; * x: (yes) yes */ #define DECLARE_VM_GET_PAGE_PROT \ -pgprot_t vm_get_page_prot(unsigned long vm_flags) \ +pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \ { \ return protection_map[vm_flags & \ (VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \ diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 9a8189ffd0f2..d138e1815645 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h @@ -165,25 +165,25 @@ struct pinctrl_desc { /* External interface to pin controller */ -extern int pinctrl_register_and_init(struct pinctrl_desc *pctldesc, +extern int pinctrl_register_and_init(const struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data, struct pinctrl_dev **pctldev); extern int pinctrl_enable(struct pinctrl_dev *pctldev); /* Please use pinctrl_register_and_init() and pinctrl_enable() instead */ -extern struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc, +extern struct pinctrl_dev *pinctrl_register(const struct pinctrl_desc *pctldesc, struct device *dev, void *driver_data); extern void pinctrl_unregister(struct pinctrl_dev *pctldev); extern int devm_pinctrl_register_and_init(struct device *dev, - struct pinctrl_desc *pctldesc, + const struct pinctrl_desc *pctldesc, void *driver_data, struct pinctrl_dev **pctldev); /* Please use devm_pinctrl_register_and_init() instead */ extern struct pinctrl_dev *devm_pinctrl_register(struct device *dev, - struct pinctrl_desc *pctldesc, + const struct pinctrl_desc *pctldesc, void *driver_data); extern void devm_pinctrl_unregister(struct device *dev, diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h index 54d672dd6f7d..76043a97f975 100644 --- a/include/linux/platform_data/emc2305.h +++ b/include/linux/platform_data/emc2305.h @@ -9,14 +9,20 @@ * struct emc2305_platform_data - EMC2305 driver platform data * @max_state: maximum cooling state of the cooling device; * @pwm_num: number of active channels; + * @pwm_output_mask: PWM output mask + * @pwm_polarity_mask: PWM polarity mask * @pwm_separate: separate PWM settings for every channel; * @pwm_min: array of minimum PWM per channel; + * @pwm_freq: array of PWM frequency per channel */ struct emc2305_platform_data { u8 max_state; u8 pwm_num; + u8 pwm_output_mask; + u8 pwm_polarity_mask; bool pwm_separate; u8 pwm_min[EMC2305_PWM_MAX]; + u16 pwm_freq[EMC2305_PWM_MAX]; }; #endif diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 0cca01b5607b..f21f806bfb38 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h @@ -232,7 +232,6 @@ struct power_supply; /* Run-time specific power supply configuration */ struct power_supply_config { - struct device_node *of_node; struct fwnode_handle *fwnode; /* Driver private data */ @@ -808,19 +807,10 @@ static inline void power_supply_put(struct power_supply *psy) {} static inline struct power_supply *power_supply_get_by_name(const char *name) { return NULL; } #endif -#ifdef CONFIG_OF -extern struct power_supply *power_supply_get_by_phandle(struct device_node *np, - const char *property); -extern struct power_supply *devm_power_supply_get_by_phandle( +extern struct power_supply *power_supply_get_by_reference(struct fwnode_handle *fwnode, + const char *property); +extern struct power_supply *devm_power_supply_get_by_reference( struct device *dev, const char *property); -#else /* !CONFIG_OF */ -static inline struct power_supply * -power_supply_get_by_phandle(struct device_node *np, const char *property) -{ return NULL; } -static inline struct power_supply * -devm_power_supply_get_by_phandle(struct device *dev, const char *property) -{ return NULL; } -#endif /* CONFIG_OF */ extern const enum power_supply_property power_supply_battery_info_properties[]; extern const size_t power_supply_battery_info_properties_size; diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index de1d24f19f76..f139377f4b31 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h @@ -27,6 +27,7 @@ enum { PROC_ENTRY_proc_read_iter = 1U << 1, PROC_ENTRY_proc_compat_ioctl = 1U << 2, + PROC_ENTRY_proc_lseek = 1U << 3, PROC_ENTRY_FORCE_LOOKUP = 1U << 7, }; diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h index 72ff44cca864..2467b3be15c9 100644 --- a/include/linux/raid/pq.h +++ b/include/linux/raid/pq.h @@ -11,8 +11,13 @@ #ifdef __KERNEL__ #include <linux/blkdev.h> +#include <linux/mm.h> -extern const char raid6_empty_zero_page[PAGE_SIZE]; +/* This should be const but the raid6 code is too convoluted for that. */ +static inline void *raid6_get_zero_page(void) +{ + return page_address(ZERO_PAGE(0)); +} #else /* ! __KERNEL__ */ /* Used for testing in user space */ @@ -191,6 +196,11 @@ static inline uint32_t raid6_jiffies(void) return tv.tv_sec*1000 + tv.tv_usec/1000; } +static inline void *raid6_get_zero_page(void) +{ + return raid6_empty_zero_page; +} + #endif /* ! __KERNEL__ */ #endif /* LINUX_RAID_RAID6_H */ diff --git a/include/linux/relay.h b/include/linux/relay.h index b3224111d074..6772a7075840 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h @@ -29,6 +29,22 @@ #define RELAYFS_CHANNEL_VERSION 7 /* + * Relay buffer statistics + */ +enum { + RELAY_STATS_BUF_FULL = (1 << 0), + RELAY_STATS_WRT_BIG = (1 << 1), + + RELAY_STATS_LAST = RELAY_STATS_WRT_BIG, +}; + +struct rchan_buf_stats +{ + unsigned int full_count; /* counter for buffer full */ + unsigned int big_count; /* counter for too big to write */ +}; + +/* * Per-cpu relay channel buffer */ struct rchan_buf @@ -43,11 +59,11 @@ struct rchan_buf struct irq_work wakeup_work; /* reader wakeup */ struct dentry *dentry; /* channel file dentry */ struct kref kref; /* channel buffer refcount */ + struct rchan_buf_stats stats; /* buffer stats */ struct page **page_array; /* array of current buffer pages */ unsigned int page_count; /* number of current buffer pages */ unsigned int finalized; /* buffer has been finalized */ size_t *padding; /* padding counts per sub-buffer */ - size_t prev_padding; /* temporary variable */ size_t bytes_consumed; /* bytes consumed in cur read subbuf */ size_t early_bytes; /* bytes consumed before VFS inited */ unsigned int cpu; /* this buf's cpu */ @@ -65,7 +81,6 @@ struct rchan const struct rchan_callbacks *cb; /* client callbacks */ struct kref kref; /* channel refcount */ void *private_data; /* for user-defined data */ - size_t last_toobig; /* tried to log event > subbuf size */ struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */ int is_global; /* One global buffer ? */ struct list_head list; /* for channel list */ @@ -84,7 +99,6 @@ struct rchan_callbacks * @buf: the channel buffer containing the new sub-buffer * @subbuf: the start of the new sub-buffer * @prev_subbuf: the start of the previous sub-buffer - * @prev_padding: unused space at the end of previous sub-buffer * * The client should return 1 to continue logging, 0 to stop * logging. @@ -100,8 +114,7 @@ struct rchan_callbacks */ int (*subbuf_start) (struct rchan_buf *buf, void *subbuf, - void *prev_subbuf, - size_t prev_padding); + void *prev_subbuf); /* * create_buf_file - create file to represent a relay channel buffer @@ -161,6 +174,7 @@ struct rchan *relay_open(const char *base_filename, void *private_data); extern void relay_close(struct rchan *chan); extern void relay_flush(struct rchan *chan); +size_t relay_stats(struct rchan *chan, int flags); extern void relay_subbufs_consumed(struct rchan *chan, unsigned int cpu, size_t consumed); diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index bc90c3c7b5fd..876358cfe1b1 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h @@ -144,6 +144,9 @@ int ring_buffer_write(struct trace_buffer *buffer, void ring_buffer_nest_start(struct trace_buffer *buffer); void ring_buffer_nest_end(struct trace_buffer *buffer); +DEFINE_GUARD(ring_buffer_nest, struct trace_buffer *, + ring_buffer_nest_start(_T), ring_buffer_nest_end(_T)) + struct ring_buffer_event * ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events); diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c4f4903b1088..6cd020eea37a 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -449,6 +449,28 @@ static inline void __folio_rmap_sanity_checks(const struct folio *folio, default: VM_WARN_ON_ONCE(true); } + + /* + * Anon folios must have an associated live anon_vma as long as they're + * mapped into userspace. + * Note that the atomic_read() mainly does two things: + * + * 1. In KASAN builds with CONFIG_SLUB_RCU_DEBUG, it causes KASAN to + * check that the associated anon_vma has not yet been freed (subject + * to KASAN's usual limitations). This check will pass if the + * anon_vma's refcount has already dropped to 0 but an RCU grace + * period hasn't passed since then. + * 2. If the anon_vma has not yet been freed, it checks that the + * anon_vma still has a nonzero refcount (as opposed to being in the + * middle of an RCU delay for getting freed). + */ + if (folio_test_anon(folio) && !folio_test_ksm(folio)) { + unsigned long mapping = (unsigned long)folio->mapping; + struct anon_vma *anon_vma; + + anon_vma = (void *)(mapping - FOLIO_MAPPING_ANON); + VM_WARN_ON_FOLIO(atomic_read(&anon_vma->refcount) == 0, folio); + } } /* @@ -893,7 +915,7 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio, * Called from mm/vmscan.c to handle paging out */ int folio_referenced(struct folio *, int is_locked, - struct mem_cgroup *memcg, unsigned long *vm_flags); + struct mem_cgroup *memcg, vm_flags_t *vm_flags); void try_to_migrate(struct folio *folio, enum ttu_flags flags); void try_to_unmap(struct folio *, enum ttu_flags flags); @@ -1025,7 +1047,7 @@ struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio, static inline int folio_referenced(struct folio *folio, int is_locked, struct mem_cgroup *memcg, - unsigned long *vm_flags) + vm_flags_t *vm_flags) { *vm_flags = 0; return 0; diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h index 5a41c3bbcbe3..01da4582db6d 100644 --- a/include/linux/rtc/ds1685.h +++ b/include/linux/rtc/ds1685.h @@ -8,7 +8,7 @@ * include larger, battery-backed NV-SRAM, burst-mode access, and an RTC * write counter. * - * Copyright (C) 2011-2014 Joshua Kinard <kumba@gentoo.org>. + * Copyright (C) 2011-2014 Joshua Kinard <linux@kumba.dev>. * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd-electronics.com>. * * References: diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h index c8b543d428b0..f1aaf676a874 100644 --- a/include/linux/rwsem.h +++ b/include/linux/rwsem.h @@ -132,6 +132,18 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem) return !list_empty(&sem->wait_list); } +#if defined(CONFIG_DEBUG_RWSEMS) || defined(CONFIG_DETECT_HUNG_TASK_BLOCKER) +/* + * Return just the real task structure pointer of the owner + */ +extern struct task_struct *rwsem_owner(struct rw_semaphore *sem); + +/* + * Return true if the rwsem is owned by a reader. + */ +extern bool is_rwsem_reader_owned(struct rw_semaphore *sem); +#endif + #else /* !CONFIG_PREEMPT_RT */ #include <linux/rwbase_rt.h> @@ -240,10 +252,11 @@ extern void up_write(struct rw_semaphore *sem); DEFINE_GUARD(rwsem_read, struct rw_semaphore *, down_read(_T), up_read(_T)) DEFINE_GUARD_COND(rwsem_read, _try, down_read_trylock(_T)) -DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T) == 0) +DEFINE_GUARD_COND(rwsem_read, _intr, down_read_interruptible(_T), _RET == 0) DEFINE_GUARD(rwsem_write, struct rw_semaphore *, down_write(_T), up_write(_T)) DEFINE_GUARD_COND(rwsem_write, _try, down_write_trylock(_T)) +DEFINE_GUARD_COND(rwsem_write, _kill, down_write_killable(_T), _RET == 0) /* * downgrade write lock to read lock diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 189140bf11fc..ffb9907c7070 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h @@ -210,23 +210,6 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth); int sbitmap_get(struct sbitmap *sb); /** - * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap, - * limiting the depth used from each word. - * @sb: Bitmap to allocate from. - * @shallow_depth: The maximum number of bits to allocate from a single word. - * - * This rather specific operation allows for having multiple users with - * different allocation limits. E.g., there can be a high-priority class that - * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow() - * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority - * class can only allocate half of the total bits in the bitmap, preventing it - * from starving out the high-priority class. - * - * Return: Non-negative allocated bit number if successful, -1 otherwise. - */ -int sbitmap_get_shallow(struct sbitmap *sb, unsigned long shallow_depth); - -/** * sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap. * @sb: Bitmap to check. * @@ -478,7 +461,7 @@ unsigned long __sbitmap_queue_get_batch(struct sbitmap_queue *sbq, int nr_tags, * sbitmap_queue, limiting the depth used from each word, with preemption * already disabled. * @sbq: Bitmap queue to allocate from. - * @shallow_depth: The maximum number of bits to allocate from a single word. + * @shallow_depth: The maximum number of bits to allocate from the queue. * See sbitmap_get_shallow(). * * If you call this, make sure to call sbitmap_queue_min_shallow_depth() after diff --git a/include/linux/sched.h b/include/linux/sched.h index a9693f179c58..2b272382673d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -47,6 +47,7 @@ #include <linux/rv.h> #include <linux/uidgid_types.h> #include <linux/tracepoint-defs.h> +#include <linux/unwind_deferred_types.h> #include <asm/kmap_size.h> /* task_struct member predeclarations (sorted alphabetically): */ @@ -1646,6 +1647,10 @@ struct task_struct { struct user_event_mm *user_event_mm; #endif +#ifdef CONFIG_UNWIND_USER + struct unwind_task_info unwind_info; +#endif + /* CPU-specific state of this task: */ struct thread_struct thread; diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h index f7545430a548..7047101dbf58 100644 --- a/include/linux/sched/ext.h +++ b/include/linux/sched/ext.h @@ -164,7 +164,7 @@ struct sched_ext_entity { /* * Runtime budget in nsecs. This is usually set through - * scx_bpf_dispatch() but can also be modified directly by the BPF + * scx_bpf_dsq_insert() but can also be modified directly by the BPF * scheduler. Automatically decreased by SCX as the task executes. On * depletion, a scheduling event is triggered. * @@ -176,10 +176,10 @@ struct sched_ext_entity { /* * Used to order tasks when dispatching to the vtime-ordered priority - * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime() - * but can also be modified directly by the BPF scheduler. Modifying it - * while a task is queued on a dsq may mangle the ordering and is not - * recommended. + * queue of a dsq. This is usually set through + * scx_bpf_dsq_insert_vtime() but can also be modified directly by the + * BPF scheduler. Modifying it while a task is queued on a dsq may + * mangle the ordering and is not recommended. */ u64 dsq_vtime; @@ -206,12 +206,25 @@ struct sched_ext_entity { void sched_ext_free(struct task_struct *p); void print_scx_info(const char *log_lvl, struct task_struct *p); void scx_softlockup(u32 dur_s); +bool scx_rcu_cpu_stall(void); #else /* !CONFIG_SCHED_CLASS_EXT */ static inline void sched_ext_free(struct task_struct *p) {} static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {} static inline void scx_softlockup(u32 dur_s) {} +static inline bool scx_rcu_cpu_stall(void) { return false; } #endif /* CONFIG_SCHED_CLASS_EXT */ + +struct scx_task_group { +#ifdef CONFIG_EXT_GROUP_SCHED + u32 flags; /* SCX_TG_* */ + u32 weight; + u64 bw_period_us; + u64 bw_quota_us; + u64 bw_burst_us; +#endif +}; + #endif /* _LINUX_SCHED_EXT_H */ diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 5f03a39a26f7..6d0f9c599ff7 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -11,6 +11,8 @@ #include <linux/fs_parser.h> #include <linux/userfaultfd_k.h> +struct swap_iocb; + /* inode in-kernel data */ #ifdef CONFIG_TMPFS_QUOTA @@ -107,7 +109,8 @@ static inline bool shmem_mapping(struct address_space *mapping) void shmem_unlock_mapping(struct address_space *mapping); struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, pgoff_t index, gfp_t gfp_mask); -int shmem_writeout(struct folio *folio, struct writeback_control *wbc); +int shmem_writeout(struct folio *folio, struct swap_iocb **plug, + struct list_head *folio_list); void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); int shmem_unuse(unsigned int type); diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index b8b06e71b73e..14b923ddb6df 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3033,6 +3033,29 @@ static inline void skb_reset_transport_header(struct sk_buff *skb) skb->transport_header = offset; } +/** + * skb_reset_transport_header_careful - conditionally reset transport header + * @skb: buffer to alter + * + * Hardened version of skb_reset_transport_header(). + * + * Returns: true if the operation was a success. + */ +static inline bool __must_check +skb_reset_transport_header_careful(struct sk_buff *skb) +{ + long offset = skb->data - skb->head; + + if (unlikely(offset != (typeof(skb->transport_header))offset)) + return false; + + if (unlikely(offset == (typeof(skb->transport_header))~0U)) + return false; + + skb->transport_header = offset; + return true; +} + static inline void skb_set_transport_header(struct sk_buff *skb, const int offset) { diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h index 1a2c0e0838f9..71e0c09a49eb 100644 --- a/include/linux/soc/samsung/exynos-regs-pmu.h +++ b/include/linux/soc/samsung/exynos-regs-pmu.h @@ -662,6 +662,14 @@ #define EXYNOS5433_PAD_RETENTION_UFS_OPTION (0x3268) #define EXYNOS5433_PAD_RETENTION_FSYSGENIO_OPTION (0x32A8) +/* For Exynos990 */ +#define EXYNOS990_PHY_CTRL_USB20 (0x72C) + +/* For Exynos7870 */ +#define EXYNOS7870_MIPI_PHY_CONTROL0 (0x070c) +#define EXYNOS7870_MIPI_PHY_CONTROL1 (0x0714) +#define EXYNOS7870_MIPI_PHY_CONTROL2 (0x0734) + /* For Tensor GS101 */ /* PMU ALIVE */ #define GS101_SYSIP_DAT0 (0x810) @@ -669,6 +677,7 @@ #define GS101_CPU_INFORM(cpu) \ (GS101_CPU0_INFORM + (cpu*4)) #define GS101_SYSTEM_CONFIGURATION (0x3A00) +#define GS101_EINT_WAKEUP_MASK (0x3A80) #define GS101_PHY_CTRL_USB20 (0x3EB0) #define GS101_PHY_CTRL_USBDP (0x3EB4) diff --git a/include/linux/soundwire/sdw_amd.h b/include/linux/soundwire/sdw_amd.h index 6b839987f14c..fe31773d5210 100644 --- a/include/linux/soundwire/sdw_amd.h +++ b/include/linux/soundwire/sdw_amd.h @@ -30,6 +30,7 @@ #define ACP63_PCI_REV_ID 0x63 #define ACP70_PCI_REV_ID 0x70 #define ACP71_PCI_REV_ID 0x71 +#define ACP72_PCI_REV_ID 0x72 struct acp_sdw_pdata { u16 instance; diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index c4830dfaff3d..82390712794c 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h @@ -424,7 +424,7 @@ bool spi_mem_default_supports_op(struct spi_mem *mem, int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op); -u64 spi_mem_calc_op_duration(struct spi_mem_op *op); +u64 spi_mem_calc_op_duration(struct spi_mem *mem, struct spi_mem_op *op); bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op); diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h index 876130091384..f06f7b785091 100644 --- a/include/linux/sprintf.h +++ b/include/linux/sprintf.h @@ -23,7 +23,7 @@ __scanf(2, 0) int vsscanf(const char *, const char *, va_list); /* These are for specific cases, do not use without real need */ extern bool no_hash_pointers; -int no_hash_pointers_enable(char *str); +void hash_pointers_finalize(bool slub_debug); /* Used for Rust formatting ('%pA') */ char *rust_fmt_argument(char *buf, char *end, const void *ptr); diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index e3358c630ba1..8a9ec617cf66 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h @@ -130,10 +130,7 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) __be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int len); __be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int len); __be32 *xdr_encode_string(__be32 *p, const char *s); -__be32 *xdr_decode_string_inplace(__be32 *p, char **sp, unsigned int *lenp, - unsigned int maxlen); __be32 *xdr_encode_netobj(__be32 *p, const struct xdr_netobj *); -__be32 *xdr_decode_netobj(__be32 *p, struct xdr_netobj *); void xdr_inline_pages(struct xdr_buf *, unsigned int, struct page **, unsigned int, unsigned int); @@ -342,12 +339,6 @@ xdr_stream_remaining(const struct xdr_stream *xdr) return xdr->nwords << 2; } -ssize_t xdr_stream_decode_opaque(struct xdr_stream *xdr, void *ptr, - size_t size); -ssize_t xdr_stream_decode_opaque_dup(struct xdr_stream *xdr, void **ptr, - size_t maxlen, gfp_t gfp_flags); -ssize_t xdr_stream_decode_string(struct xdr_stream *xdr, char *str, - size_t size); ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str, size_t maxlen, gfp_t gfp_flags); ssize_t xdr_stream_decode_opaque_auth(struct xdr_stream *xdr, u32 *flavor, diff --git a/include/linux/swap.h b/include/linux/swap.h index bc0e1c275fc0..2fe6ed2cc3fd 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -376,8 +376,9 @@ extern unsigned long totalreserve_pages; /* linux/mm/swap.c */ -void lru_note_cost(struct lruvec *lruvec, bool file, - unsigned int nr_io, unsigned int nr_rotated); +void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file, + unsigned int nr_io, unsigned int nr_rotated) + __releases(lruvec->lru_lock); void lru_note_cost_refault(struct folio *); void folio_add_lru(struct folio *); void folio_add_lru_vma(struct folio *, struct vm_area_struct *); @@ -415,7 +416,7 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, #define MIN_SWAPPINESS 0 #define MAX_SWAPPINESS 200 -/* Just recliam from anon folios in proactive memory reclaim */ +/* Just reclaim from anon folios in proactive memory reclaim */ #define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1) extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, @@ -431,6 +432,22 @@ extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; long remove_mapping(struct address_space *mapping, struct folio *folio); +#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) +extern int reclaim_register_node(struct node *node); +extern void reclaim_unregister_node(struct node *node); + +#else + +static inline int reclaim_register_node(struct node *node) +{ + return 0; +} + +static inline void reclaim_unregister_node(struct node *node) +{ +} +#endif /* CONFIG_SYSFS && CONFIG_NUMA */ + #ifdef CONFIG_NUMA extern int sysctl_min_unmapped_ratio; extern int sysctl_min_slab_ratio; diff --git a/include/linux/sys_info.h b/include/linux/sys_info.h new file mode 100644 index 000000000000..89d77dc4f2ed --- /dev/null +++ b/include/linux/sys_info.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_SYS_INFO_H +#define _LINUX_SYS_INFO_H + +#include <linux/sysctl.h> + +/* + * SYS_INFO_PANIC_CONSOLE_REPLAY is for panic case only, as it needs special + * handling which only fits panic case. + */ +#define SYS_INFO_TASKS 0x00000001 +#define SYS_INFO_MEM 0x00000002 +#define SYS_INFO_TIMERS 0x00000004 +#define SYS_INFO_LOCKS 0x00000008 +#define SYS_INFO_FTRACE 0x00000010 +#define SYS_INFO_PANIC_CONSOLE_REPLAY 0x00000020 +#define SYS_INFO_ALL_CPU_BT 0x00000040 +#define SYS_INFO_BLOCKED_TASKS 0x00000080 + +void sys_info(unsigned long si_mask); +unsigned long sys_info_parse_param(char *str); + +#ifdef CONFIG_SYSCTL +int sysctl_sys_info_handler(const struct ctl_table *ro_table, int write, + void *buffer, size_t *lenp, + loff_t *ppos); +#endif +#endif /* _LINUX_SYS_INFO_H */ diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index fa9cf4292dff..04307a19cde3 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h @@ -480,7 +480,6 @@ enum { EVENT_FILE_FL_RECORDED_TGID_BIT, EVENT_FILE_FL_FILTERED_BIT, EVENT_FILE_FL_NO_SET_FILTER_BIT, - EVENT_FILE_FL_SOFT_MODE_BIT, EVENT_FILE_FL_SOFT_DISABLED_BIT, EVENT_FILE_FL_TRIGGER_MODE_BIT, EVENT_FILE_FL_TRIGGER_COND_BIT, @@ -618,7 +617,6 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...); * RECORDED_TGID - The tgids should be recorded at sched_switch * FILTERED - The event has a filter attached * NO_SET_FILTER - Set when filter has error and is to be ignored - * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED * SOFT_DISABLED - When set, do not trace the event (even though its * tracepoint may be enabled) * TRIGGER_MODE - When set, invoke the triggers associated with the event @@ -633,7 +631,6 @@ enum { EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT), EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT), EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT), - EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT), EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT), EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT), EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT), diff --git a/include/linux/unwind_deferred.h b/include/linux/unwind_deferred.h new file mode 100644 index 000000000000..26122d00708a --- /dev/null +++ b/include/linux/unwind_deferred.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_UNWIND_USER_DEFERRED_H +#define _LINUX_UNWIND_USER_DEFERRED_H + +#include <linux/task_work.h> +#include <linux/unwind_user.h> +#include <linux/unwind_deferred_types.h> + +struct unwind_work; + +typedef void (*unwind_callback_t)(struct unwind_work *work, struct unwind_stacktrace *trace, u64 cookie); + +struct unwind_work { + struct list_head list; + unwind_callback_t func; + int bit; +}; + +#ifdef CONFIG_UNWIND_USER + +enum { + UNWIND_PENDING_BIT = 0, + UNWIND_USED_BIT, +}; + +enum { + UNWIND_PENDING = BIT(UNWIND_PENDING_BIT), + + /* Set if the unwinding was used (directly or deferred) */ + UNWIND_USED = BIT(UNWIND_USED_BIT) +}; + +void unwind_task_init(struct task_struct *task); +void unwind_task_free(struct task_struct *task); + +int unwind_user_faultable(struct unwind_stacktrace *trace); + +int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func); +int unwind_deferred_request(struct unwind_work *work, u64 *cookie); +void unwind_deferred_cancel(struct unwind_work *work); + +void unwind_deferred_task_exit(struct task_struct *task); + +static __always_inline void unwind_reset_info(void) +{ + struct unwind_task_info *info = ¤t->unwind_info; + unsigned long bits; + + /* Was there any unwinding? */ + if (unlikely(info->unwind_mask)) { + bits = info->unwind_mask; + do { + /* Is a task_work going to run again before going back */ + if (bits & UNWIND_PENDING) + return; + } while (!try_cmpxchg(&info->unwind_mask, &bits, 0UL)); + current->unwind_info.id.id = 0; + + if (unlikely(info->cache)) { + info->cache->nr_entries = 0; + info->cache->unwind_completed = 0; + } + } +} + +#else /* !CONFIG_UNWIND_USER */ + +static inline void unwind_task_init(struct task_struct *task) {} +static inline void unwind_task_free(struct task_struct *task) {} + +static inline int unwind_user_faultable(struct unwind_stacktrace *trace) { return -ENOSYS; } +static inline int unwind_deferred_init(struct unwind_work *work, unwind_callback_t func) { return -ENOSYS; } +static inline int unwind_deferred_request(struct unwind_work *work, u64 *timestamp) { return -ENOSYS; } +static inline void unwind_deferred_cancel(struct unwind_work *work) {} + +static inline void unwind_deferred_task_exit(struct task_struct *task) {} +static inline void unwind_reset_info(void) {} + +#endif /* !CONFIG_UNWIND_USER */ + +#endif /* _LINUX_UNWIND_USER_DEFERRED_H */ diff --git a/include/linux/unwind_deferred_types.h b/include/linux/unwind_deferred_types.h new file mode 100644 index 000000000000..33b62ac25c86 --- /dev/null +++ b/include/linux/unwind_deferred_types.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_UNWIND_USER_DEFERRED_TYPES_H +#define _LINUX_UNWIND_USER_DEFERRED_TYPES_H + +struct unwind_cache { + unsigned long unwind_completed; + unsigned int nr_entries; + unsigned long entries[]; +}; + +/* + * The unwind_task_id is a unique identifier that maps to a user space + * stacktrace. It is generated the first time a deferred user space + * stacktrace is requested after a task has entered the kerenl and + * is cleared to zero when it exits. The mapped id will be a non-zero + * number. + * + * To simplify the generation of the 64 bit number, 32 bits will be + * the CPU it was generated on, and the other 32 bits will be a per + * cpu counter that gets incremented by two every time a new identifier + * is generated. The LSB will always be set to keep the value + * from being zero. + */ +union unwind_task_id { + struct { + u32 cpu; + u32 cnt; + }; + u64 id; +}; + +struct unwind_task_info { + unsigned long unwind_mask; + struct unwind_cache *cache; + struct callback_head work; + union unwind_task_id id; +}; + +#endif /* _LINUX_UNWIND_USER_DEFERRED_TYPES_H */ diff --git a/include/linux/unwind_user.h b/include/linux/unwind_user.h new file mode 100644 index 000000000000..7f7282516bf5 --- /dev/null +++ b/include/linux/unwind_user.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_UNWIND_USER_H +#define _LINUX_UNWIND_USER_H + +#include <linux/unwind_user_types.h> +#include <asm/unwind_user.h> + +#ifndef ARCH_INIT_USER_FP_FRAME + #define ARCH_INIT_USER_FP_FRAME +#endif + +int unwind_user(struct unwind_stacktrace *trace, unsigned int max_entries); + +#endif /* _LINUX_UNWIND_USER_H */ diff --git a/include/linux/unwind_user_types.h b/include/linux/unwind_user_types.h new file mode 100644 index 000000000000..a449f15be890 --- /dev/null +++ b/include/linux/unwind_user_types.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _LINUX_UNWIND_USER_TYPES_H +#define _LINUX_UNWIND_USER_TYPES_H + +#include <linux/types.h> + +/* + * Unwind types, listed in priority order: lower numbers are attempted first if + * available. + */ +enum unwind_user_type_bits { + UNWIND_USER_TYPE_FP_BIT = 0, + + NR_UNWIND_USER_TYPE_BITS, +}; + +enum unwind_user_type { + /* Type "none" for the start of stack walk iteration. */ + UNWIND_USER_TYPE_NONE = 0, + UNWIND_USER_TYPE_FP = BIT(UNWIND_USER_TYPE_FP_BIT), +}; + +struct unwind_stacktrace { + unsigned int nr; + unsigned long *entries; +}; + +struct unwind_user_frame { + s32 cfa_off; + s32 ra_off; + s32 fp_off; + bool use_fp; +}; + +struct unwind_user_state { + unsigned long ip; + unsigned long sp; + unsigned long fp; + enum unwind_user_type current_type; + unsigned int available_types; + bool done; +}; + +#endif /* _LINUX_UNWIND_USER_TYPES_H */ diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h index bce95153e5a6..ee19e9f915b8 100644 --- a/include/linux/usb/uvc.h +++ b/include/linux/usb/uvc.h @@ -29,6 +29,9 @@ #define UVC_GUID_EXT_GPIO_CONTROLLER \ {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03} +#define UVC_GUID_MSXU_1_5 \ + {0xdc, 0x95, 0x3f, 0x0f, 0x32, 0x26, 0x4e, 0x4c, \ + 0x92, 0xc9, 0xa0, 0x47, 0x82, 0xf4, 0x3b, 0xc8} #define UVC_GUID_FORMAT_MJPEG \ { 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \ diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h index 75342022d144..c0e716aec26a 100644 --- a/include/linux/userfaultfd_k.h +++ b/include/linux/userfaultfd_k.h @@ -30,11 +30,7 @@ * from userfaultfd, in order to leave a free define-space for * shared O_* flags. */ -#define UFFD_CLOEXEC O_CLOEXEC -#define UFFD_NONBLOCK O_NONBLOCK - #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) -#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS) /* * Start with fault_pending_wqh and fault_wqh so they're more likely @@ -213,12 +209,12 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma) } static inline bool vma_can_userfault(struct vm_area_struct *vma, - unsigned long vm_flags, + vm_flags_t vm_flags, bool wp_async) { vm_flags &= __VM_UFFD_FLAGS; - if (vm_flags & VM_DROPPABLE) + if (vma->vm_flags & VM_DROPPABLE) return false; if ((vm_flags & VM_UFFD_MINOR) && @@ -263,6 +259,7 @@ extern void mremap_userfaultfd_prep(struct vm_area_struct *, extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *, unsigned long from, unsigned long to, unsigned long len); +void mremap_userfaultfd_fail(struct vm_userfaultfd_ctx *); extern bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, @@ -285,7 +282,7 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi, int userfaultfd_register_range(struct userfaultfd_ctx *ctx, struct vm_area_struct *vma, - unsigned long vm_flags, + vm_flags_t vm_flags, unsigned long start, unsigned long end, bool wp_async); @@ -375,6 +372,10 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx, { } +static inline void mremap_userfaultfd_fail(struct vm_userfaultfd_ctx *ctx) +{ +} + static inline bool userfaultfd_remove(struct vm_area_struct *vma, unsigned long start, unsigned long end) diff --git a/include/linux/vfio.h b/include/linux/vfio.h index 707b00772ce1..eb563f538dee 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h @@ -105,6 +105,9 @@ struct vfio_device { * @match: Optional device name match callback (return: 0 for no-match, >0 for * match, -errno for abort (ex. match with insufficient or incorrect * additional args) + * @match_token_uuid: Optional device token match/validation. Return 0 + * if the uuid is valid for the device, -errno otherwise. uuid is NULL + * if none was provided. * @dma_unmap: Called when userspace unmaps IOVA from the container * this device is attached to. * @device_feature: Optional, fill in the VFIO_DEVICE_FEATURE ioctl @@ -132,6 +135,7 @@ struct vfio_device_ops { int (*mmap)(struct vfio_device *vdev, struct vm_area_struct *vma); void (*request)(struct vfio_device *vdev, unsigned int count); int (*match)(struct vfio_device *vdev, char *buf); + int (*match_token_uuid)(struct vfio_device *vdev, const uuid_t *uuid); void (*dma_unmap)(struct vfio_device *vdev, u64 iova, u64 length); int (*device_feature)(struct vfio_device *device, u32 flags, void __user *arg, size_t argsz); diff --git a/include/linux/vfio_pci_core.h b/include/linux/vfio_pci_core.h index fbb472dd99b3..f541044e42a2 100644 --- a/include/linux/vfio_pci_core.h +++ b/include/linux/vfio_pci_core.h @@ -122,6 +122,8 @@ ssize_t vfio_pci_core_write(struct vfio_device *core_vdev, const char __user *bu int vfio_pci_core_mmap(struct vfio_device *core_vdev, struct vm_area_struct *vma); void vfio_pci_core_request(struct vfio_device *core_vdev, unsigned int count); int vfio_pci_core_match(struct vfio_device *core_vdev, char *buf); +int vfio_pci_core_match_token_uuid(struct vfio_device *core_vdev, + const uuid_t *uuid); int vfio_pci_core_enable(struct vfio_pci_core_device *vdev); void vfio_pci_core_disable(struct vfio_pci_core_device *vdev); void vfio_pci_core_finish_enable(struct vfio_pci_core_device *vdev); diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 04b90c88d164..db31fc6f4f1f 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -199,7 +199,7 @@ int virtio_device_reset_done(struct virtio_device *dev); size_t virtio_max_dma_size(const struct virtio_device *vdev); #define virtio_device_for_each_vq(vdev, vq) \ - list_for_each_entry(vq, &vdev->vqs, list) + list_for_each_entry(vq, &(vdev)->vqs, list) /** * struct virtio_driver - operations for a virtio I/O driver diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h index 36fb3edfa403..0c67543a45c8 100644 --- a/include/linux/virtio_vsock.h +++ b/include/linux/virtio_vsock.h @@ -47,31 +47,50 @@ static inline void virtio_vsock_skb_clear_tap_delivered(struct sk_buff *skb) VIRTIO_VSOCK_SKB_CB(skb)->tap_delivered = false; } -static inline void virtio_vsock_skb_rx_put(struct sk_buff *skb) +static inline void virtio_vsock_skb_put(struct sk_buff *skb, u32 len) { - u32 len; + DEBUG_NET_WARN_ON_ONCE(skb->len); - len = le32_to_cpu(virtio_vsock_hdr(skb)->len); - - if (len > 0) + if (skb_is_nonlinear(skb)) + skb->len = len; + else skb_put(skb, len); } -static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) +static inline struct sk_buff * +__virtio_vsock_alloc_skb_with_frags(unsigned int header_len, + unsigned int data_len, + gfp_t mask) { struct sk_buff *skb; + int err; - if (size < VIRTIO_VSOCK_SKB_HEADROOM) - return NULL; - - skb = alloc_skb(size, mask); + skb = alloc_skb_with_frags(header_len, data_len, + PAGE_ALLOC_COSTLY_ORDER, &err, mask); if (!skb) return NULL; skb_reserve(skb, VIRTIO_VSOCK_SKB_HEADROOM); + skb->data_len = data_len; return skb; } +static inline struct sk_buff * +virtio_vsock_alloc_linear_skb(unsigned int size, gfp_t mask) +{ + return __virtio_vsock_alloc_skb_with_frags(size, 0, mask); +} + +static inline struct sk_buff *virtio_vsock_alloc_skb(unsigned int size, gfp_t mask) +{ + if (size <= SKB_WITH_OVERHEAD(PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) + return virtio_vsock_alloc_linear_skb(size, mask); + + size -= VIRTIO_VSOCK_SKB_HEADROOM; + return __virtio_vsock_alloc_skb_with_frags(VIRTIO_VSOCK_SKB_HEADROOM, + size, mask); +} + static inline void virtio_vsock_skb_queue_head(struct sk_buff_head *list, struct sk_buff *skb) { @@ -111,7 +130,12 @@ static inline size_t virtio_vsock_skb_len(struct sk_buff *skb) return (size_t)(skb_end_pointer(skb) - skb->head); } -#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4) +/* Dimension the RX SKB so that the entire thing fits exactly into + * a single 4KiB page. This avoids wasting memory due to alloc_skb() + * rounding up to the next page order and also means that we + * don't leave higher-order pages sitting around in the RX queue. + */ +#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE SKB_WITH_OVERHEAD(1024 * 4) #define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL #define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64) diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index b2ccb6845595..c287998908bf 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -507,7 +507,7 @@ static inline const char *lru_list_name(enum lru_list lru) return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_" } -#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG) +#if defined(CONFIG_VM_EVENT_COUNTERS) static inline const char *vm_event_name(enum vm_event_item item) { return vmstat_text[NR_VM_ZONE_STAT_ITEMS + @@ -516,7 +516,7 @@ static inline const char *vm_event_name(enum vm_event_item item) NR_VM_STAT_ITEMS + item]; } -#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */ +#endif /* CONFIG_VM_EVENT_COUNTERS */ #ifdef CONFIG_MEMCG diff --git a/include/linux/vringh.h b/include/linux/vringh.h index c3a8117dabe8..49e7cbc9697a 100644 --- a/include/linux/vringh.h +++ b/include/linux/vringh.h @@ -175,9 +175,6 @@ int vringh_complete_multi_user(struct vringh *vrh, const struct vring_used_elem used[], unsigned num_used); -/* Pretend we've never seen descriptor (for easy error handling). */ -void vringh_abandon_user(struct vringh *vrh, unsigned int num); - /* Do we need to fire the eventfd to notify the other side? */ int vringh_need_notify_user(struct vringh *vrh); @@ -235,10 +232,6 @@ int vringh_getdesc_kern(struct vringh *vrh, u16 *head, gfp_t gfp); -ssize_t vringh_iov_pull_kern(struct vringh_kiov *riov, void *dst, size_t len); -ssize_t vringh_iov_push_kern(struct vringh_kiov *wiov, - const void *src, size_t len); -void vringh_abandon_kern(struct vringh *vrh, unsigned int num); int vringh_complete_kern(struct vringh *vrh, u16 head, u32 len); bool vringh_notify_enable_kern(struct vringh *vrh); @@ -319,13 +312,8 @@ ssize_t vringh_iov_push_iotlb(struct vringh *vrh, struct vringh_kiov *wiov, const void *src, size_t len); -void vringh_abandon_iotlb(struct vringh *vrh, unsigned int num); - int vringh_complete_iotlb(struct vringh *vrh, u16 head, u32 len); -bool vringh_notify_enable_iotlb(struct vringh *vrh); -void vringh_notify_disable_iotlb(struct vringh *vrh); - int vringh_need_notify_iotlb(struct vringh *vrh); #endif /* CONFIG_VHOST_IOTLB */ diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h index 99660197a36c..8c60687a3e55 100644 --- a/include/linux/watchdog.h +++ b/include/linux/watchdog.h @@ -9,14 +9,18 @@ #ifndef _LINUX_WATCHDOG_H #define _LINUX_WATCHDOG_H - #include <linux/bitops.h> -#include <linux/cdev.h> -#include <linux/device.h> -#include <linux/kernel.h> +#include <linux/limits.h> #include <linux/notifier.h> +#include <linux/printk.h> +#include <linux/types.h> + #include <uapi/linux/watchdog.h> +struct attribute_group; +struct device; +struct module; + struct watchdog_ops; struct watchdog_device; struct watchdog_core_data; diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 6e30f275da77..45d5dd470ff6 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h @@ -6,6 +6,7 @@ #ifndef _LINUX_WORKQUEUE_H #define _LINUX_WORKQUEUE_H +#include <linux/alloc_tag.h> #include <linux/timer.h> #include <linux/linkage.h> #include <linux/bitops.h> @@ -401,6 +402,7 @@ enum wq_flags { * http://thread.gmane.org/gmane.linux.kernel/1480396 */ WQ_POWER_EFFICIENT = 1 << 7, + WQ_PERCPU = 1 << 8, /* bound to a specific cpu */ __WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */ __WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */ @@ -427,7 +429,7 @@ enum wq_consts { /* * System-wide workqueues which are always present. * - * system_wq is the one used by schedule[_delayed]_work[_on](). + * system_percpu_wq is the one used by schedule[_delayed]_work[_on](). * Multi-CPU multi-threaded. There are users which expect relatively * short queue flush time. Don't queue works which can run for too * long. @@ -438,7 +440,7 @@ enum wq_consts { * system_long_wq is similar to system_wq but may host long running * works. Queue flushing might take relatively long. * - * system_unbound_wq is unbound workqueue. Workers are not bound to + * system_dfl_wq is unbound workqueue. Workers are not bound to * any specific CPU, not concurrency managed, and all queued works are * executed immediately as long as max_active limit is not reached and * resources are available. @@ -455,10 +457,12 @@ enum wq_consts { * system_bh[_highpri]_wq are convenience interface to softirq. BH work items * are executed in the queueing CPU's BH context in the queueing order. */ -extern struct workqueue_struct *system_wq; +extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */ +extern struct workqueue_struct *system_percpu_wq; extern struct workqueue_struct *system_highpri_wq; extern struct workqueue_struct *system_long_wq; extern struct workqueue_struct *system_unbound_wq; +extern struct workqueue_struct *system_dfl_wq; extern struct workqueue_struct *system_freezable_wq; extern struct workqueue_struct *system_power_efficient_wq; extern struct workqueue_struct *system_freezable_power_efficient_wq; @@ -505,7 +509,8 @@ void workqueue_softirq_dead(unsigned int cpu); * Pointer to the allocated workqueue on success, %NULL on failure. */ __printf(1, 4) struct workqueue_struct * -alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...); +alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...); +#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__)) #ifdef CONFIG_LOCKDEP /** @@ -544,8 +549,8 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, * Pointer to the allocated workqueue on success, %NULL on failure. */ #define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \ - alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \ - 1, lockdep_map, ##args) + alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\ + 1, lockdep_map, ##args)) #endif /** @@ -577,7 +582,9 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active, extern void destroy_workqueue(struct workqueue_struct *wq); -struct workqueue_attrs *alloc_workqueue_attrs(void); +struct workqueue_attrs *alloc_workqueue_attrs_noprof(void); +#define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__)) + void free_workqueue_attrs(struct workqueue_attrs *attrs); int apply_workqueue_attrs(struct workqueue_struct *wq, const struct workqueue_attrs *attrs); @@ -840,19 +847,6 @@ long work_on_cpu_key(int cpu, long (*fn)(void *), work_on_cpu_key(_cpu, _fn, _arg, &__key); \ }) -long work_on_cpu_safe_key(int cpu, long (*fn)(void *), - void *arg, struct lock_class_key *key); - -/* - * A new key is defined for each caller to make sure the work - * associated with the function doesn't share its locking class. - */ -#define work_on_cpu_safe(_cpu, _fn, _arg) \ -({ \ - static struct lock_class_key __key; \ - \ - work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \ -}) #endif /* CONFIG_SMP */ #ifdef CONFIG_FREEZER diff --git a/include/linux/writeback.h b/include/linux/writeback.h index eda4b62511f7..a2848d731a46 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -59,7 +59,6 @@ struct writeback_control { unsigned for_kupdate:1; /* A kupdate writeback */ unsigned for_background:1; /* A background writeback */ unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ - unsigned for_reclaim:1; /* Invoked from the page allocator */ unsigned range_cyclic:1; /* range_start is cyclic */ unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */ unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */ @@ -72,16 +71,6 @@ struct writeback_control { */ unsigned no_cgroup_owner:1; - /* To enable batching of swap writes to non-block-device backends, - * "plug" can be set point to a 'struct swap_iocb *'. When all swap - * writes have been submitted, if with swap_iocb is not NULL, - * swap_write_unplug() should be called. - */ - struct swap_iocb **swap_plug; - - /* Target list for splitting a large folio */ - struct list_head *list; - /* internal fields used by the ->writepages implementation: */ struct folio_batch fbatch; pgoff_t index; diff --git a/include/linux/xxhash.h b/include/linux/xxhash.h index df42511438d0..27f57eca8cb1 100644 --- a/include/linux/xxhash.h +++ b/include/linux/xxhash.h @@ -178,32 +178,6 @@ struct xxh64_state { void xxh32_reset(struct xxh32_state *state, uint32_t seed); /** - * xxh32_update() - hash the data given and update the xxh32 state - * - * @state: The xxh32 state to update. - * @input: The data to hash. - * @length: The length of the data to hash. - * - * After calling xxh32_reset() call xxh32_update() as many times as necessary. - * - * Return: Zero on success, otherwise an error code. - */ -int xxh32_update(struct xxh32_state *state, const void *input, size_t length); - -/** - * xxh32_digest() - produce the current xxh32 hash - * - * @state: Produce the current xxh32 hash of this state. - * - * A hash value can be produced at any time. It is still possible to continue - * inserting input into the hash state after a call to xxh32_digest(), and - * generate new hashes later on, by calling xxh32_digest() again. - * - * Return: The xxh32 hash stored in the state. - */ -uint32_t xxh32_digest(const struct xxh32_state *state); - -/** * xxh64_reset() - reset the xxh64 state to start a new hashing operation * * @state: The xxh64 state to reset. diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h index 13e9cc5490f7..f3ccff2d966c 100644 --- a/include/linux/zsmalloc.h +++ b/include/linux/zsmalloc.h @@ -46,4 +46,6 @@ void zs_obj_read_end(struct zs_pool *pool, unsigned long handle, void zs_obj_write(struct zs_pool *pool, unsigned long handle, void *handle_mem, size_t mem_len); +extern const struct movable_operations zsmalloc_mops; + #endif diff --git a/include/media/rcar-fcp.h b/include/media/rcar-fcp.h index 179240fb163b..6ac9be9f675e 100644 --- a/include/media/rcar-fcp.h +++ b/include/media/rcar-fcp.h @@ -18,6 +18,7 @@ void rcar_fcp_put(struct rcar_fcp_device *fcp); struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp); int rcar_fcp_enable(struct rcar_fcp_device *fcp); void rcar_fcp_disable(struct rcar_fcp_device *fcp); +int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp); #else static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np) { @@ -33,6 +34,10 @@ static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp) return 0; } static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { } +static inline int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp) +{ + return 0; +} #endif #endif /* __MEDIA_RCAR_FCP_H__ */ diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h index 3a87096e064f..c32c46286441 100644 --- a/include/media/v4l2-ctrls.h +++ b/include/media/v4l2-ctrls.h @@ -579,8 +579,10 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl, * @hdl: The control handler. * * Does nothing if @hdl == NULL. + * + * Return: @hdl's error field or 0 if @hdl is NULL. */ -void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl); +int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl); /** * v4l2_ctrl_lock() - Helper function to lock the handler diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h index 1b6222fab24e..a69801274800 100644 --- a/include/media/v4l2-dev.h +++ b/include/media/v4l2-dev.h @@ -313,10 +313,16 @@ struct video_device { * media_entity_to_video_device - Returns a &struct video_device from * the &struct media_entity embedded on it. * - * @__entity: pointer to &struct media_entity - */ -#define media_entity_to_video_device(__entity) \ - container_of(__entity, struct video_device, entity) + * @__entity: pointer to &struct media_entity, may be NULL + */ +#define media_entity_to_video_device(__entity) \ +({ \ + typeof(__entity) __me_vdev_ent = __entity; \ + \ + __me_vdev_ent ? \ + container_of(__me_vdev_ent, struct video_device, entity) : \ + NULL; \ +}) /** * to_video_device - Returns a &struct video_device from the diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h index c6ec87e88dfe..82695c3a300a 100644 --- a/include/media/v4l2-ioctl.h +++ b/include/media/v4l2-ioctl.h @@ -679,6 +679,7 @@ long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd, #endif unsigned int v4l2_compat_translate_cmd(unsigned int cmd); +unsigned int v4l2_translate_cmd(unsigned int cmd); int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd); int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd); int v4l2_compat_get_array_args(struct file *file, void *mbuf, diff --git a/include/media/v4l2-jpeg.h b/include/media/v4l2-jpeg.h index b65658a02e3c..62dda1560275 100644 --- a/include/media/v4l2-jpeg.h +++ b/include/media/v4l2-jpeg.h @@ -169,15 +169,6 @@ struct v4l2_jpeg_header { int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out); -int v4l2_jpeg_parse_frame_header(void *buf, size_t len, - struct v4l2_jpeg_frame_header *frame_header); -int v4l2_jpeg_parse_scan_header(void *buf, size_t len, - struct v4l2_jpeg_scan_header *scan_header); -int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision, - struct v4l2_jpeg_reference *q_tables); -int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len, - struct v4l2_jpeg_reference *huffman_tables); - extern const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK]; extern const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK]; extern const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK]; diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h index 57f2bcb4eb16..5dcf4065708f 100644 --- a/include/media/v4l2-subdev.h +++ b/include/media/v4l2-subdev.h @@ -460,8 +460,6 @@ enum v4l2_subdev_pre_streamon_flags { * but use the v4l2_subdev_enable_streams() and * v4l2_subdev_disable_streams() helpers. * - * @g_pixelaspect: callback to return the pixelaspect ratio. - * * @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev * can adjust @size to a lower value and must not write more data to the * buffer starting at @data than the original value of @size. @@ -491,7 +489,6 @@ struct v4l2_subdev_video_ops { int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std); int (*g_input_status)(struct v4l2_subdev *sd, u32 *status); int (*s_stream)(struct v4l2_subdev *sd, int enable); - int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect); int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf, unsigned int *size); int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags); diff --git a/include/media/vsp1.h b/include/media/vsp1.h index 4ea6352fd63f..d9b91ff02761 100644 --- a/include/media/vsp1.h +++ b/include/media/vsp1.h @@ -14,6 +14,11 @@ #include <linux/videodev2.h> struct device; +struct vsp1_dl_list; + +/* ----------------------------------------------------------------------------- + * VSP1 DU interface + */ int vsp1_du_init(struct device *dev); @@ -121,4 +126,88 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index, int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt); void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt); +/* ----------------------------------------------------------------------------- + * VSP1 ISP interface + */ + +/** + * struct vsp1_isp_buffer_desc - Describe a buffer allocated by VSPX + * @size: Byte size of the buffer allocated by VSPX + * @cpu_addr: CPU-mapped address of a buffer allocated by VSPX + * @dma_addr: bus address of a buffer allocated by VSPX + */ +struct vsp1_isp_buffer_desc { + size_t size; + void *cpu_addr; + dma_addr_t dma_addr; +}; + +/** + * struct vsp1_isp_job_desc - Describe a VSPX buffer transfer request + * @config: ConfigDMA buffer descriptor + * @config.pairs: number of reg-value pairs in the ConfigDMA buffer + * @config.mem: bus address of the ConfigDMA buffer + * @img: RAW image buffer descriptor + * @img.fmt: RAW image format + * @img.mem: bus address of the RAW image buffer + * @dl: pointer to the display list populated by the VSPX driver in the + * vsp1_isp_job_prepare() function + * + * Describe a transfer request for the VSPX to perform on behalf of the ISP. + * The job descriptor contains an optional ConfigDMA buffer and one RAW image + * buffer. Set config.pairs to 0 if no ConfigDMA buffer should be transferred. + * The minimum number of config.pairs that can be written using ConfigDMA is 17. + * A number of pairs < 16 corrupts the output image. A number of pairs == 16 + * freezes the VSPX operation. If the ISP driver has to write less than 17 pairs + * it shall pad the buffer with writes directed to registers that have no effect + * or avoid using ConfigDMA at all for such small write sequences. + * + * The ISP driver shall pass an instance this type to the vsp1_isp_job_prepare() + * function that will populate the display list pointer @dl using the @config + * and @img descriptors. When the job has to be run on the VSPX, the descriptor + * shall be passed to vsp1_isp_job_run() which consumes the display list. + * + * Job descriptors not yet run shall be released with a call to + * vsp1_isp_job_release() when stopping the streaming in order to properly + * release the resources acquired by vsp1_isp_job_prepare(). + */ +struct vsp1_isp_job_desc { + struct { + unsigned int pairs; + dma_addr_t mem; + } config; + struct { + struct v4l2_pix_format_mplane fmt; + dma_addr_t mem; + } img; + struct vsp1_dl_list *dl; +}; + +/** + * struct vsp1_vspx_frame_end - VSPX frame end callback data + * @vspx_frame_end: Frame end callback. Called after a transfer job has been + * completed. If the job includes both a ConfigDMA and a + * RAW image, the callback is called after both have been + * transferred + * @frame_end_data: Frame end callback data, passed to vspx_frame_end + */ +struct vsp1_vspx_frame_end { + void (*vspx_frame_end)(void *data); + void *frame_end_data; +}; + +int vsp1_isp_init(struct device *dev); +struct device *vsp1_isp_get_bus_master(struct device *dev); +int vsp1_isp_alloc_buffer(struct device *dev, size_t size, + struct vsp1_isp_buffer_desc *buffer_desc); +void vsp1_isp_free_buffer(struct device *dev, + struct vsp1_isp_buffer_desc *buffer_desc); +int vsp1_isp_start_streaming(struct device *dev, + struct vsp1_vspx_frame_end *frame_end); +void vsp1_isp_stop_streaming(struct device *dev); +int vsp1_isp_job_prepare(struct device *dev, + struct vsp1_isp_job_desc *job); +int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job); +void vsp1_isp_job_release(struct device *dev, struct vsp1_isp_job_desc *job); + #endif /* __MEDIA_VSP1_H__ */ diff --git a/include/net/dst.h b/include/net/dst.h index 00467c1b5093..bab01363bb97 100644 --- a/include/net/dst.h +++ b/include/net/dst.h @@ -568,11 +568,23 @@ static inline struct net_device *dst_dev(const struct dst_entry *dst) return READ_ONCE(dst->dev); } +static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst) +{ + /* In the future, use rcu_dereference(dst->dev) */ + WARN_ON_ONCE(!rcu_read_lock_held()); + return READ_ONCE(dst->dev); +} + static inline struct net_device *skb_dst_dev(const struct sk_buff *skb) { return dst_dev(skb_dst(skb)); } +static inline struct net_device *skb_dst_dev_rcu(const struct sk_buff *skb) +{ + return dst_dev_rcu(skb_dst(skb)); +} + static inline struct net *skb_dst_dev_net(const struct sk_buff *skb) { return dev_net(skb_dst_dev(skb)); diff --git a/include/net/tcp.h b/include/net/tcp.h index b3815d104340..526a26e7a150 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -2655,8 +2655,8 @@ void tcp_update_ulp(struct sock *sk, struct proto *p, void (*write_space)(struct sock *sk)); #define MODULE_ALIAS_TCP_ULP(name) \ - __MODULE_INFO(alias, alias_userspace, name); \ - __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name) + MODULE_INFO(alias, name); \ + MODULE_INFO(alias, "tcp-ulp-" name) #ifdef CONFIG_NET_SOCK_MSG struct sk_msg; diff --git a/include/net/udp.h b/include/net/udp.h index f8ae2c4ade14..e2af3bda90c9 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -586,6 +586,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, { netdev_features_t features = NETIF_F_SG; struct sk_buff *segs; + int drop_count; + + /* + * Segmentation in UDP receive path is only for UDP GRO, drop udp + * fragmentation offload (UFO) packets. + */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) { + drop_count = 1; + goto drop; + } /* Avoid csum recalculation by skb_segment unless userspace explicitly * asks for the final checksum values @@ -609,16 +619,18 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk, */ segs = __skb_gso_segment(skb, features, false); if (IS_ERR_OR_NULL(segs)) { - int segs_nr = skb_shinfo(skb)->gso_segs; - - atomic_add(segs_nr, &sk->sk_drops); - SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, segs_nr); - kfree_skb(skb); - return NULL; + drop_count = skb_shinfo(skb)->gso_segs; + goto drop; } consume_skb(skb); return segs; + +drop: + atomic_add(drop_count, &sk->sk_drops); + SNMP_ADD_STATS(__UDPX_MIB(sk, ipv4), UDP_MIB_INERRORS, drop_count); + kfree_skb(skb); + return NULL; } static inline void udp_post_segment_fix_csum(struct sk_buff *skb) diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 7dc7b1cc71b5..0a8e092c0ea8 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -52,11 +52,15 @@ static inline int ib_umem_offset(struct ib_umem *umem) return umem->address & ~PAGE_MASK; } +static inline dma_addr_t ib_umem_start_dma_addr(struct ib_umem *umem) +{ + return sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem); +} + static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem, unsigned long pgsz) { - return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) & - (pgsz - 1); + return ib_umem_start_dma_addr(umem) & (pgsz - 1); } static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem, @@ -135,14 +139,27 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem, unsigned long pgsz_bitmap, u64 pgoff_bitmask) { - struct scatterlist *sg = umem->sgt_append.sgt.sgl; dma_addr_t dma_addr; - dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK); + dma_addr = ib_umem_start_dma_addr(umem); return ib_umem_find_best_pgsz(umem, pgsz_bitmap, dma_addr & pgoff_bitmask); } +static inline bool ib_umem_is_contiguous(struct ib_umem *umem) +{ + dma_addr_t dma_addr; + unsigned long pgsz; + + /* + * Select the smallest aligned page that can contain the whole umem if + * it was contiguous. + */ + dma_addr = ib_umem_start_dma_addr(umem); + pgsz = roundup_pow_of_two((dma_addr ^ (umem->length - 1 + dma_addr)) + 1); + return !!ib_umem_find_best_pgoff(umem, pgsz, U64_MAX); +} + struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device, unsigned long offset, size_t size, int fd, int access, diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index af43a8d2a74a..6139223e92e4 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -42,6 +42,7 @@ #include <rdma/signature.h> #include <uapi/rdma/rdma_user_ioctl.h> #include <uapi/rdma/ib_user_ioctl_verbs.h> +#include <linux/pci-tph.h> #define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN @@ -1846,6 +1847,27 @@ struct ib_dm { atomic_t usecnt; }; +/* bit values to mark existence of ib_dmah fields */ +enum { + IB_DMAH_CPU_ID_EXISTS, + IB_DMAH_MEM_TYPE_EXISTS, + IB_DMAH_PH_EXISTS, +}; + +struct ib_dmah { + struct ib_device *device; + struct ib_uobject *uobject; + /* + * Implementation details of the RDMA core, don't use in drivers: + */ + struct rdma_restrack_entry res; + u32 cpu_id; + enum tph_mem_type mem_type; + atomic_t usecnt; + u8 ph; + u8 valid_fields; /* use IB_DMAH_XXX_EXISTS */ +}; + struct ib_mr { struct ib_device *device; struct ib_pd *pd; @@ -1863,6 +1885,7 @@ struct ib_mr { struct ib_dm *dm; struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */ + struct ib_dmah *dmah; /* * Implementation details of the RDMA core, don't use in drivers: */ @@ -2486,16 +2509,31 @@ struct ib_device_ops { int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata); int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr, struct uverbs_attr_bundle *attrs); + int (*create_cq_umem)(struct ib_cq *cq, + const struct ib_cq_init_attr *attr, + struct ib_umem *umem, + struct uverbs_attr_bundle *attrs); int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period); int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata); int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata); + /** + * pre_destroy_cq - Prevent a cq from generating any new work + * completions, but not free any kernel resources + */ + int (*pre_destroy_cq)(struct ib_cq *cq); + /** + * post_destroy_cq - Free all kernel resources + */ + void (*post_destroy_cq)(struct ib_cq *cq); struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags); struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length, u64 virt_addr, int mr_access_flags, + struct ib_dmah *dmah, struct ib_udata *udata); struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset, u64 length, u64 virt_addr, int fd, int mr_access_flags, + struct ib_dmah *dmah, struct uverbs_attr_bundle *attrs); struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start, u64 length, u64 virt_addr, @@ -2560,6 +2598,9 @@ struct ib_device_ops { struct ib_dm_alloc_attr *attr, struct uverbs_attr_bundle *attrs); int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs); + int (*alloc_dmah)(struct ib_dmah *ibdmah, + struct uverbs_attr_bundle *attrs); + int (*dealloc_dmah)(struct ib_dmah *dmah, struct uverbs_attr_bundle *attrs); struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm, struct ib_dm_mr_attr *attr, struct uverbs_attr_bundle *attrs); @@ -2717,6 +2758,7 @@ struct ib_device_ops { DECLARE_RDMA_OBJ_SIZE(ib_ah); DECLARE_RDMA_OBJ_SIZE(ib_counters); DECLARE_RDMA_OBJ_SIZE(ib_cq); + DECLARE_RDMA_OBJ_SIZE(ib_dmah); DECLARE_RDMA_OBJ_SIZE(ib_mw); DECLARE_RDMA_OBJ_SIZE(ib_pd); DECLARE_RDMA_OBJ_SIZE(ib_qp); @@ -2905,11 +2947,18 @@ struct ib_block_iter { unsigned int __pg_bit; /* alignment of current block */ }; -struct ib_device *_ib_alloc_device(size_t size); +struct ib_device *_ib_alloc_device(size_t size, struct net *net); #define ib_alloc_device(drv_struct, member) \ container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ BUILD_BUG_ON_ZERO(offsetof( \ - struct drv_struct, member))), \ + struct drv_struct, member)), \ + &init_net), \ + struct drv_struct, member) + +#define ib_alloc_device_with_net(drv_struct, member, net) \ + container_of(_ib_alloc_device(sizeof(struct drv_struct) + \ + BUILD_BUG_ON_ZERO(offsetof( \ + struct drv_struct, member)), net), \ struct drv_struct, member) void ib_dealloc_device(struct ib_device *device); @@ -4794,11 +4843,17 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile); #if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs); +bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs); #else static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs) { return 0; } +static inline bool +rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs) +{ + return false; +} #endif struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num, @@ -4855,6 +4910,12 @@ static inline int ibdev_to_node(struct ib_device *ibdev) bool rdma_dev_access_netns(const struct ib_device *device, const struct net *net); +bool rdma_dev_has_raw_cap(const struct ib_device *dev); +static inline struct net *rdma_dev_net(struct ib_device *device) +{ + return read_pnet(&device->coredev.rdma_net); +} + #define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000) #define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF) #define IB_GRH_FLOWLABEL_MASK (0x000FFFFF) diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h index 0d69ded73bf2..8a9bcf77dace 100644 --- a/include/rdma/restrack.h +++ b/include/rdma/restrack.h @@ -57,6 +57,10 @@ enum rdma_restrack_type { */ RDMA_RESTRACK_SRQ, /** + * @RDMA_RESTRACK_DMAH: DMA handle + */ + RDMA_RESTRACK_DMAH, + /** * @RDMA_RESTRACK_MAX: Last entry, used for array dclarations */ RDMA_RESTRACK_MAX diff --git a/include/scsi/sas_ata.h b/include/scsi/sas_ata.h index 92e27e7bf088..a161c0222931 100644 --- a/include/scsi/sas_ata.h +++ b/include/scsi/sas_ata.h @@ -15,89 +15,37 @@ #ifdef CONFIG_SCSI_SAS_ATA -static inline int dev_is_sata(struct domain_device *dev) +static inline bool dev_is_sata(struct domain_device *dev) { - return dev->dev_type == SAS_SATA_DEV || dev->dev_type == SAS_SATA_PM || - dev->dev_type == SAS_SATA_PM_PORT || dev->dev_type == SAS_SATA_PENDING; + switch (dev->dev_type) { + case SAS_SATA_DEV: + case SAS_SATA_PENDING: + case SAS_SATA_PM: + case SAS_SATA_PM_PORT: + return true; + default: + return false; + } } -int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy); -int sas_ata_init(struct domain_device *dev); -void sas_ata_task_abort(struct sas_task *task); -void sas_ata_strategy_handler(struct Scsi_Host *shost); -void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q); void sas_ata_schedule_reset(struct domain_device *dev); -void sas_ata_wait_eh(struct domain_device *dev); -void sas_probe_sata(struct asd_sas_port *port); -void sas_suspend_sata(struct asd_sas_port *port); -void sas_resume_sata(struct asd_sas_port *port); -void sas_ata_end_eh(struct ata_port *ap); void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset); -int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, - int force_phy_id); +int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id); int smp_ata_check_ready_type(struct ata_link *link); -int sas_discover_sata(struct domain_device *dev); -int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, - struct domain_device *child, int phy_id); extern const struct attribute_group sas_ata_sdev_attr_group; #else -static inline void sas_ata_disabled_notice(void) -{ - pr_notice_once("ATA device seen but CONFIG_SCSI_SAS_ATA=N\n"); -} - -static inline int dev_is_sata(struct domain_device *dev) -{ - return 0; -} -static inline int sas_ata_init(struct domain_device *dev) -{ - return 0; -} -static inline void sas_ata_task_abort(struct sas_task *task) -{ -} - -static inline void sas_ata_strategy_handler(struct Scsi_Host *shost) -{ -} - -static inline void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q) +static inline bool dev_is_sata(struct domain_device *dev) { + return false; } static inline void sas_ata_schedule_reset(struct domain_device *dev) { } -static inline void sas_ata_wait_eh(struct domain_device *dev) -{ -} - -static inline void sas_probe_sata(struct asd_sas_port *port) -{ -} - -static inline void sas_suspend_sata(struct asd_sas_port *port) -{ -} - -static inline void sas_resume_sata(struct asd_sas_port *port) -{ -} - -static inline int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) -{ - return 0; -} - -static inline void sas_ata_end_eh(struct ata_port *ap) -{ -} - static inline void sas_ata_device_link_abort(struct domain_device *dev, bool force_reset) { @@ -114,19 +62,6 @@ static inline int smp_ata_check_ready_type(struct ata_link *link) return 0; } -static inline int sas_discover_sata(struct domain_device *dev) -{ - sas_ata_disabled_notice(); - return -ENXIO; -} - -static inline int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, - struct domain_device *child, int phy_id) -{ - sas_ata_disabled_notice(); - return -ENODEV; -} - #define sas_ata_sdev_attr_group ((struct attribute_group) {}) #endif diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 68dd49947d04..6d6500148c4b 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -184,6 +184,11 @@ struct scsi_device { */ unsigned force_runtime_start_on_system_start:1; + /* + * Set if the device is an ATA device. + */ + unsigned is_ata:1; + unsigned removable:1; unsigned changed:1; /* Data invalid due to media change */ unsigned busy:1; /* Used to prevent races */ diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index d02b55261307..b908aacfef48 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h @@ -383,6 +383,8 @@ struct fc_rport { /* aka fc_starget_attrs */ struct work_struct stgt_delete_work; struct work_struct rport_delete_work; struct request_queue *rqst_q; /* bsg support */ + + struct workqueue_struct *devloss_work_q; } __attribute__((aligned(sizeof(unsigned long)))); /* bit field values for struct fc_rport "flags" field: */ @@ -576,7 +578,6 @@ struct fc_host_attrs { /* work queues for rport state manipulation */ struct workqueue_struct *work_q; - struct workqueue_struct *devloss_work_q; /* bsg support */ struct request_queue *rqst_q; @@ -654,8 +655,6 @@ struct fc_host_attrs { (((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse) #define fc_host_work_q(x) \ (((struct fc_host_attrs *)(x)->shost_data)->work_q) -#define fc_host_devloss_work_q(x) \ - (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) #define fc_host_dev_loss_tmo(x) \ (((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo) #define fc_host_max_ct_payload(x) \ diff --git a/include/soc/spacemit/k1-syscon.h b/include/soc/spacemit/k1-syscon.h new file mode 100644 index 000000000000..c59bd7a38e5b --- /dev/null +++ b/include/soc/spacemit/k1-syscon.h @@ -0,0 +1,160 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +/* SpacemiT clock and reset driver definitions for the K1 SoC */ + +#ifndef __SOC_K1_SYSCON_H__ +#define __SOC_K1_SYSCON_H__ + +/* Auxiliary device used to represent a CCU reset controller */ +struct spacemit_ccu_adev { + struct auxiliary_device adev; + struct regmap *regmap; +}; + +static inline struct spacemit_ccu_adev * +to_spacemit_ccu_adev(struct auxiliary_device *adev) +{ + return container_of(adev, struct spacemit_ccu_adev, adev); +} + +/* APBS register offset */ +#define APBS_PLL1_SWCR1 0x100 +#define APBS_PLL1_SWCR2 0x104 +#define APBS_PLL1_SWCR3 0x108 +#define APBS_PLL2_SWCR1 0x118 +#define APBS_PLL2_SWCR2 0x11c +#define APBS_PLL2_SWCR3 0x120 +#define APBS_PLL3_SWCR1 0x124 +#define APBS_PLL3_SWCR2 0x128 +#define APBS_PLL3_SWCR3 0x12c + +/* MPMU register offset */ +#define MPMU_POSR 0x0010 +#define POSR_PLL1_LOCK BIT(27) +#define POSR_PLL2_LOCK BIT(28) +#define POSR_PLL3_LOCK BIT(29) +#define MPMU_SUCCR 0x0014 +#define MPMU_ISCCR 0x0044 +#define MPMU_WDTPCR 0x0200 +#define MPMU_RIPCCR 0x0210 +#define MPMU_ACGR 0x1024 +#define MPMU_APBCSCR 0x1050 +#define MPMU_SUCCR_1 0x10b0 + +/* APBC register offset */ +#define APBC_UART1_CLK_RST 0x00 +#define APBC_UART2_CLK_RST 0x04 +#define APBC_GPIO_CLK_RST 0x08 +#define APBC_PWM0_CLK_RST 0x0c +#define APBC_PWM1_CLK_RST 0x10 +#define APBC_PWM2_CLK_RST 0x14 +#define APBC_PWM3_CLK_RST 0x18 +#define APBC_TWSI8_CLK_RST 0x20 +#define APBC_UART3_CLK_RST 0x24 +#define APBC_RTC_CLK_RST 0x28 +#define APBC_TWSI0_CLK_RST 0x2c +#define APBC_TWSI1_CLK_RST 0x30 +#define APBC_TIMERS1_CLK_RST 0x34 +#define APBC_TWSI2_CLK_RST 0x38 +#define APBC_AIB_CLK_RST 0x3c +#define APBC_TWSI4_CLK_RST 0x40 +#define APBC_TIMERS2_CLK_RST 0x44 +#define APBC_ONEWIRE_CLK_RST 0x48 +#define APBC_TWSI5_CLK_RST 0x4c +#define APBC_DRO_CLK_RST 0x58 +#define APBC_IR_CLK_RST 0x5c +#define APBC_TWSI6_CLK_RST 0x60 +#define APBC_COUNTER_CLK_SEL 0x64 +#define APBC_TWSI7_CLK_RST 0x68 +#define APBC_TSEN_CLK_RST 0x6c +#define APBC_UART4_CLK_RST 0x70 +#define APBC_UART5_CLK_RST 0x74 +#define APBC_UART6_CLK_RST 0x78 +#define APBC_SSP3_CLK_RST 0x7c +#define APBC_SSPA0_CLK_RST 0x80 +#define APBC_SSPA1_CLK_RST 0x84 +#define APBC_IPC_AP2AUD_CLK_RST 0x90 +#define APBC_UART7_CLK_RST 0x94 +#define APBC_UART8_CLK_RST 0x98 +#define APBC_UART9_CLK_RST 0x9c +#define APBC_CAN0_CLK_RST 0xa0 +#define APBC_PWM4_CLK_RST 0xa8 +#define APBC_PWM5_CLK_RST 0xac +#define APBC_PWM6_CLK_RST 0xb0 +#define APBC_PWM7_CLK_RST 0xb4 +#define APBC_PWM8_CLK_RST 0xb8 +#define APBC_PWM9_CLK_RST 0xbc +#define APBC_PWM10_CLK_RST 0xc0 +#define APBC_PWM11_CLK_RST 0xc4 +#define APBC_PWM12_CLK_RST 0xc8 +#define APBC_PWM13_CLK_RST 0xcc +#define APBC_PWM14_CLK_RST 0xd0 +#define APBC_PWM15_CLK_RST 0xd4 +#define APBC_PWM16_CLK_RST 0xd8 +#define APBC_PWM17_CLK_RST 0xdc +#define APBC_PWM18_CLK_RST 0xe0 +#define APBC_PWM19_CLK_RST 0xe4 + +/* APMU register offset */ +#define APMU_JPG_CLK_RES_CTRL 0x020 +#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024 +#define APMU_ISP_CLK_RES_CTRL 0x038 +#define APMU_LCD_CLK_RES_CTRL1 0x044 +#define APMU_LCD_SPI_CLK_RES_CTRL 0x048 +#define APMU_LCD_CLK_RES_CTRL2 0x04c +#define APMU_CCIC_CLK_RES_CTRL 0x050 +#define APMU_SDH0_CLK_RES_CTRL 0x054 +#define APMU_SDH1_CLK_RES_CTRL 0x058 +#define APMU_USB_CLK_RES_CTRL 0x05c +#define APMU_QSPI_CLK_RES_CTRL 0x060 +#define APMU_DMA_CLK_RES_CTRL 0x064 +#define APMU_AES_CLK_RES_CTRL 0x068 +#define APMU_VPU_CLK_RES_CTRL 0x0a4 +#define APMU_GPU_CLK_RES_CTRL 0x0cc +#define APMU_SDH2_CLK_RES_CTRL 0x0e0 +#define APMU_PMUA_MC_CTRL 0x0e8 +#define APMU_PMU_CC2_AP 0x100 +#define APMU_PMUA_EM_CLK_RES_CTRL 0x104 +#define APMU_AUDIO_CLK_RES_CTRL 0x14c +#define APMU_HDMI_CLK_RES_CTRL 0x1b8 +#define APMU_CCI550_CLK_CTRL 0x300 +#define APMU_ACLK_CLK_CTRL 0x388 +#define APMU_CPU_C0_CLK_CTRL 0x38C +#define APMU_CPU_C1_CLK_CTRL 0x390 +#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc +#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4 +#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc +#define APMU_EMAC0_CLK_RES_CTRL 0x3e4 +#define APMU_EMAC1_CLK_RES_CTRL 0x3ec + +/* RCPU register offsets */ +#define RCPU_SSP0_CLK_RST 0x0028 +#define RCPU_I2C0_CLK_RST 0x0030 +#define RCPU_UART1_CLK_RST 0x003c +#define RCPU_CAN_CLK_RST 0x0048 +#define RCPU_IR_CLK_RST 0x004c +#define RCPU_UART0_CLK_RST 0x00d8 +#define AUDIO_HDMI_CLK_CTRL 0x2044 + +/* RCPU2 register offsets */ +#define RCPU2_PWM0_CLK_RST 0x0000 +#define RCPU2_PWM1_CLK_RST 0x0004 +#define RCPU2_PWM2_CLK_RST 0x0008 +#define RCPU2_PWM3_CLK_RST 0x000c +#define RCPU2_PWM4_CLK_RST 0x0010 +#define RCPU2_PWM5_CLK_RST 0x0014 +#define RCPU2_PWM6_CLK_RST 0x0018 +#define RCPU2_PWM7_CLK_RST 0x001c +#define RCPU2_PWM8_CLK_RST 0x0020 +#define RCPU2_PWM9_CLK_RST 0x0024 + +/* APBC2 register offsets */ +#define APBC2_UART1_CLK_RST 0x0000 +#define APBC2_SSP2_CLK_RST 0x0004 +#define APBC2_TWSI3_CLK_RST 0x0008 +#define APBC2_RTC_CLK_RST 0x000c +#define APBC2_TIMERS0_CLK_RST 0x0010 +#define APBC2_KPC_CLK_RST 0x0014 +#define APBC2_GPIO_CLK_RST 0x001c + +#endif /* __SOC_K1_SYSCON_H__ */ diff --git a/include/sound/sdca_function.h b/include/sound/sdca_function.h index 90d77fc46416..06ec126cdcc3 100644 --- a/include/sound/sdca_function.h +++ b/include/sound/sdca_function.h @@ -742,14 +742,14 @@ struct sdca_control_range { * struct sdca_control - information for one SDCA Control * @label: Name for the Control, from SDCA Specification v1.0, section 7.1.7. * @sel: Identifier used for addressing. - * @value: Holds the Control value for constants and defaults. * @nbits: Number of bits used in the Control. - * @interrupt_position: SCDA interrupt line that will alert to changes on this - * Control. + * @values: Holds the Control value for constants and defaults. * @cn_list: A bitmask showing the valid Control Numbers within this Control, * Control Numbers typically represent channels. - * @range: Buffer describing valid range of values for the Control. + * @interrupt_position: SCDA interrupt line that will alert to changes on this + * Control. * @type: Format of the data in the Control. + * @range: Buffer describing valid range of values for the Control. * @mode: Access mode of the Control. * @layers: Bitmask of access layers of the Control. * @deferrable: Indicates if the access to the Control can be deferred. @@ -760,13 +760,13 @@ struct sdca_control { const char *label; int sel; - int value; int nbits; - int interrupt_position; + int *values; u64 cn_list; + int interrupt_position; - struct sdca_control_range range; enum sdca_control_datatype type; + struct sdca_control_range range; enum sdca_access_mode mode; u8 layers; diff --git a/include/sound/tas2770-tlv.h b/include/sound/tas2770-tlv.h index c0bd495b4a07..c7380925417a 100644 --- a/include/sound/tas2770-tlv.h +++ b/include/sound/tas2770-tlv.h @@ -14,10 +14,10 @@ #ifndef __TAS2770_TLV_H__ #define __TAS2770_TLV_H__ -#define TAS2770_DVC_LEVEL TASDEVICE_REG(0x0, 0x0, 0x17) +#define TAS2770_DVC_LEVEL TASDEVICE_REG(0x0, 0x0, 0x05) #define TAS2770_AMP_LEVEL TASDEVICE_REG(0x0, 0x0, 0x03) -static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2770_dvc_tlv, 1650, 50, 0); +static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2770_dvc_tlv, -10000, 50, 0); static const __maybe_unused DECLARE_TLV_DB_SCALE(tas2770_amp_tlv, 1100, 50, 0); #endif diff --git a/include/sound/tas2781-tlv.h b/include/sound/tas2781-tlv.h index d87263e43fdb..ef9b9f19d212 100644 --- a/include/sound/tas2781-tlv.h +++ b/include/sound/tas2781-tlv.h @@ -15,7 +15,7 @@ #ifndef __TAS2781_TLV_H__ #define __TAS2781_TLV_H__ -static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 100, 0); +static const __maybe_unused DECLARE_TLV_DB_SCALE(dvc_tlv, -10000, 50, 0); static const __maybe_unused DECLARE_TLV_DB_SCALE(amp_vol_tlv, 1100, 50, 0); #endif diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h index d54fe354b390..7e418f065b94 100644 --- a/include/trace/events/btrfs.h +++ b/include/trace/events/btrfs.h @@ -687,7 +687,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __field( loff_t, range_start ) __field( loff_t, range_end ) __field( char, for_kupdate ) - __field( char, for_reclaim ) __field( char, range_cyclic ) __field( unsigned long, writeback_index ) __field( u64, root_objectid ) @@ -701,7 +700,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage, __entry->range_start = wbc->range_start; __entry->range_end = wbc->range_end; __entry->for_kupdate = wbc->for_kupdate; - __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; __entry->writeback_index = inode->i_mapping->writeback_index; __entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root); @@ -710,13 +708,12 @@ DECLARE_EVENT_CLASS(btrfs__writepage, TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu " "nr_to_write=%ld pages_skipped=%ld range_start=%llu " "range_end=%llu for_kupdate=%d " - "for_reclaim=%d range_cyclic=%d writeback_index=%lu", + "range_cyclic=%d writeback_index=%lu", show_root_type(__entry->root_objectid), __entry->ino, __entry->index, __entry->nr_to_write, __entry->pages_skipped, __entry->range_start, __entry->range_end, - __entry->for_kupdate, - __entry->for_reclaim, __entry->range_cyclic, + __entry->for_kupdate, __entry->range_cyclic, __entry->writeback_index) ); diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h index 7d332387be6c..ba9229af9a34 100644 --- a/include/trace/events/cgroup.h +++ b/include/trace/events/cgroup.h @@ -257,53 +257,6 @@ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock, TP_ARGS(cgrp, cpu, contended) ); -/* - * Related to per CPU locks: - * global rstat_base_cpu_lock for base stats - * cgroup_subsys::rstat_ss_cpu_lock for subsystem stats - */ -DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended, - - TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), - - TP_ARGS(cgrp, cpu, contended) -); - -DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended_fastpath, - - TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), - - TP_ARGS(cgrp, cpu, contended) -); - -DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked, - - TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), - - TP_ARGS(cgrp, cpu, contended) -); - -DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked_fastpath, - - TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), - - TP_ARGS(cgrp, cpu, contended) -); - -DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock, - - TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), - - TP_ARGS(cgrp, cpu, contended) -); - -DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock_fastpath, - - TP_PROTO(struct cgroup *cgrp, int cpu, bool contended), - - TP_ARGS(cgrp, cpu, contended) -); - #endif /* _TRACE_CGROUP_H */ /* This part must be outside protection */ diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h index da4bd9fd1162..852d725afea2 100644 --- a/include/trace/events/damon.h +++ b/include/trace/events/damon.h @@ -9,6 +9,30 @@ #include <linux/types.h> #include <linux/tracepoint.h> +TRACE_EVENT(damos_esz, + + TP_PROTO(unsigned int context_idx, unsigned int scheme_idx, + unsigned long esz), + + TP_ARGS(context_idx, scheme_idx, esz), + + TP_STRUCT__entry( + __field(unsigned int, context_idx) + __field(unsigned int, scheme_idx) + __field(unsigned long, esz) + ), + + TP_fast_assign( + __entry->context_idx = context_idx; + __entry->scheme_idx = scheme_idx; + __entry->esz = esz; + ), + + TP_printk("ctx_idx=%u scheme_idx=%u esz=%lu", + __entry->context_idx, __entry->scheme_idx, + __entry->esz) +); + TRACE_EVENT_CONDITION(damos_before_apply, TP_PROTO(unsigned int context_idx, unsigned int scheme_idx, @@ -48,6 +72,23 @@ TRACE_EVENT_CONDITION(damos_before_apply, __entry->nr_accesses, __entry->age) ); +TRACE_EVENT(damon_monitor_intervals_tune, + + TP_PROTO(unsigned long sample_us), + + TP_ARGS(sample_us), + + TP_STRUCT__entry( + __field(unsigned long, sample_us) + ), + + TP_fast_assign( + __entry->sample_us = sample_us; + ), + + TP_printk("sample_us=%lu", __entry->sample_us) +); + TRACE_EVENT(damon_aggregated, TP_PROTO(unsigned int target_id, struct damon_region *r, diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h index 6f9cf2811733..a374e7ea7e57 100644 --- a/include/trace/events/ext4.h +++ b/include/trace/events/ext4.h @@ -23,10 +23,7 @@ struct partial_cluster; #define show_mballoc_flags(flags) __print_flags(flags, "|", \ { EXT4_MB_HINT_MERGE, "HINT_MERGE" }, \ - { EXT4_MB_HINT_RESERVED, "HINT_RESV" }, \ - { EXT4_MB_HINT_METADATA, "HINT_MDATA" }, \ { EXT4_MB_HINT_FIRST, "HINT_FIRST" }, \ - { EXT4_MB_HINT_BEST, "HINT_BEST" }, \ { EXT4_MB_HINT_DATA, "HINT_DATA" }, \ { EXT4_MB_HINT_NOPREALLOC, "HINT_NOPREALLOC" }, \ { EXT4_MB_HINT_GROUP_ALLOC, "HINT_GRP_ALLOC" }, \ @@ -483,16 +480,17 @@ TRACE_EVENT(ext4_writepages, (unsigned long) __entry->writeback_index) ); -TRACE_EVENT(ext4_da_write_pages, - TP_PROTO(struct inode *inode, pgoff_t first_page, +TRACE_EVENT(ext4_da_write_folios_start, + TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos, struct writeback_control *wbc), - TP_ARGS(inode, first_page, wbc), + TP_ARGS(inode, start_pos, next_pos, wbc), TP_STRUCT__entry( __field( dev_t, dev ) __field( ino_t, ino ) - __field( pgoff_t, first_page ) + __field( loff_t, start_pos ) + __field( loff_t, next_pos ) __field( long, nr_to_write ) __field( int, sync_mode ) ), @@ -500,18 +498,48 @@ TRACE_EVENT(ext4_da_write_pages, TP_fast_assign( __entry->dev = inode->i_sb->s_dev; __entry->ino = inode->i_ino; - __entry->first_page = first_page; + __entry->start_pos = start_pos; + __entry->next_pos = next_pos; __entry->nr_to_write = wbc->nr_to_write; __entry->sync_mode = wbc->sync_mode; ), - TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld " - "sync_mode %d", + TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld sync_mode %d", MAJOR(__entry->dev), MINOR(__entry->dev), - (unsigned long) __entry->ino, __entry->first_page, + (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos, __entry->nr_to_write, __entry->sync_mode) ); +TRACE_EVENT(ext4_da_write_folios_end, + TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos, + struct writeback_control *wbc, int ret), + + TP_ARGS(inode, start_pos, next_pos, wbc, ret), + + TP_STRUCT__entry( + __field( dev_t, dev ) + __field( ino_t, ino ) + __field( loff_t, start_pos ) + __field( loff_t, next_pos ) + __field( long, nr_to_write ) + __field( int, ret ) + ), + + TP_fast_assign( + __entry->dev = inode->i_sb->s_dev; + __entry->ino = inode->i_ino; + __entry->start_pos = start_pos; + __entry->next_pos = next_pos; + __entry->nr_to_write = wbc->nr_to_write; + __entry->ret = ret; + ), + + TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld ret %d", + MAJOR(__entry->dev), MINOR(__entry->dev), + (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos, + __entry->nr_to_write, __entry->ret) +); + TRACE_EVENT(ext4_da_write_pages_extent, TP_PROTO(struct inode *inode, struct ext4_map_blocks *map), diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h index 76b56f78abb0..50ebc1290ab0 100644 --- a/include/trace/events/fs_dax.h +++ b/include/trace/events/fs_dax.h @@ -15,7 +15,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class, __field(unsigned long, ino) __field(unsigned long, vm_start) __field(unsigned long, vm_end) - __field(unsigned long, vm_flags) + __field(vm_flags_t, vm_flags) __field(unsigned long, address) __field(pgoff_t, pgoff) __field(pgoff_t, max_pgoff) @@ -67,7 +67,7 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class, TP_ARGS(inode, vmf, zero_folio, radix_entry), TP_STRUCT__entry( __field(unsigned long, ino) - __field(unsigned long, vm_flags) + __field(vm_flags_t, vm_flags) __field(unsigned long, address) __field(struct folio *, zero_folio) __field(void *, radix_entry) @@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(dax_pte_fault_class, TP_ARGS(inode, vmf, result), TP_STRUCT__entry( __field(unsigned long, ino) - __field(unsigned long, vm_flags) + __field(vm_flags_t, vm_flags) __field(unsigned long, address) __field(pgoff_t, pgoff) __field(dev_t, dev) diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h index f74925a6cf69..474358773abe 100644 --- a/include/trace/events/kmem.h +++ b/include/trace/events/kmem.h @@ -304,44 +304,6 @@ TRACE_EVENT(mm_page_alloc_extfrag, __entry->change_ownership) ); -TRACE_EVENT(mm_alloc_contig_migrate_range_info, - - TP_PROTO(unsigned long start, - unsigned long end, - unsigned long nr_migrated, - unsigned long nr_reclaimed, - unsigned long nr_mapped, - int migratetype), - - TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype), - - TP_STRUCT__entry( - __field(unsigned long, start) - __field(unsigned long, end) - __field(unsigned long, nr_migrated) - __field(unsigned long, nr_reclaimed) - __field(unsigned long, nr_mapped) - __field(int, migratetype) - ), - - TP_fast_assign( - __entry->start = start; - __entry->end = end; - __entry->nr_migrated = nr_migrated; - __entry->nr_reclaimed = nr_reclaimed; - __entry->nr_mapped = nr_mapped; - __entry->migratetype = migratetype; - ), - - TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu", - __entry->start, - __entry->end, - __entry->migratetype, - __entry->nr_migrated, - __entry->nr_reclaimed, - __entry->nr_mapped) -); - TRACE_EVENT(mm_setup_per_zone_wmarks, TP_PROTO(struct zone *zone), diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h index f8d61485de16..ee2843a5daef 100644 --- a/include/trace/events/mmap.h +++ b/include/trace/events/mmap.h @@ -43,58 +43,6 @@ TRACE_EVENT(vm_unmapped_area, __entry->align_offset) ); -TRACE_EVENT(vma_mas_szero, - TP_PROTO(struct maple_tree *mt, unsigned long start, - unsigned long end), - - TP_ARGS(mt, start, end), - - TP_STRUCT__entry( - __field(struct maple_tree *, mt) - __field(unsigned long, start) - __field(unsigned long, end) - ), - - TP_fast_assign( - __entry->mt = mt; - __entry->start = start; - __entry->end = end; - ), - - TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,", - __entry->mt, - (unsigned long) __entry->start, - (unsigned long) __entry->end - ) -); - -TRACE_EVENT(vma_store, - TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma), - - TP_ARGS(mt, vma), - - TP_STRUCT__entry( - __field(struct maple_tree *, mt) - __field(struct vm_area_struct *, vma) - __field(unsigned long, vm_start) - __field(unsigned long, vm_end) - ), - - TP_fast_assign( - __entry->mt = mt; - __entry->vma = vma; - __entry->vm_start = vma->vm_start; - __entry->vm_end = vma->vm_end - 1; - ), - - TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,", - __entry->mt, __entry->vma, - (unsigned long) __entry->vm_start, - (unsigned long) __entry->vm_end - ) -); - - TRACE_EVENT(exit_mmap, TP_PROTO(struct mm_struct *mm), diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9f16ee2603ba..7b2645b50e78 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -829,8 +829,6 @@ TRACE_EVENT(sched_wake_idle_without_ipi, /* * Following tracepoints are not exported in tracefs and provide hooking * mechanisms only for testing and debugging purposes. - * - * Postfixed with _tp to make them easily identifiable in the code. */ DECLARE_TRACE(pelt_cfs, TP_PROTO(struct cfs_rq *cfs_rq), diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h index bf6cc98d9122..c36c72ab7f2b 100644 --- a/include/trace/events/scsi.h +++ b/include/trace/events/scsi.h @@ -200,6 +200,14 @@ TRACE_EVENT(scsi_dispatch_cmd_start, __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len)) ); +#define scsi_rtn_name(result) { result, #result } +#define show_rtn_name(val) \ + __print_symbolic(val, \ + scsi_rtn_name(SCSI_MLQUEUE_HOST_BUSY), \ + scsi_rtn_name(SCSI_MLQUEUE_DEVICE_BUSY), \ + scsi_rtn_name(SCSI_MLQUEUE_EH_RETRY), \ + scsi_rtn_name(SCSI_MLQUEUE_TARGET_BUSY)) + TRACE_EVENT(scsi_dispatch_cmd_error, TP_PROTO(struct scsi_cmnd *cmd, int rtn), @@ -240,14 +248,15 @@ TRACE_EVENT(scsi_dispatch_cmd_error, TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \ " prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \ - " rtn=%d", + " rtn=%s", __entry->host_no, __entry->channel, __entry->id, __entry->lun, __entry->data_sglen, __entry->prot_sglen, show_prot_op_name(__entry->prot_op), __entry->driver_tag, __entry->scheduler_tag, show_opcode_name(__entry->opcode), __parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len), __print_hex(__get_dynamic_array(cmnd), __entry->cmd_len), - __entry->rtn) + show_rtn_name(__entry->rtn) + ) ); DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template, diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 0ff388131fc9..1e23919c0da9 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h @@ -459,7 +459,6 @@ DECLARE_EVENT_CLASS(wbc_class, __field(int, sync_mode) __field(int, for_kupdate) __field(int, for_background) - __field(int, for_reclaim) __field(int, range_cyclic) __field(long, range_start) __field(long, range_end) @@ -473,23 +472,20 @@ DECLARE_EVENT_CLASS(wbc_class, __entry->sync_mode = wbc->sync_mode; __entry->for_kupdate = wbc->for_kupdate; __entry->for_background = wbc->for_background; - __entry->for_reclaim = wbc->for_reclaim; __entry->range_cyclic = wbc->range_cyclic; __entry->range_start = (long)wbc->range_start; __entry->range_end = (long)wbc->range_end; __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); ), - TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " - "bgrd=%d reclm=%d cyclic=%d " - "start=0x%lx end=0x%lx cgroup_ino=%lu", + TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d " + "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%lu", __entry->name, __entry->nr_to_write, __entry->pages_skipped, __entry->sync_mode, __entry->for_kupdate, __entry->for_background, - __entry->for_reclaim, __entry->range_cyclic, __entry->range_start, __entry->range_end, diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h index 2e21b5594f81..ea5a0899ecf0 100644 --- a/include/uapi/linux/capability.h +++ b/include/uapi/linux/capability.h @@ -6,9 +6,10 @@ * Alexander Kjeldaas <astor@guardian.no> * with help from Aleph1, Roland Buresund and Andrew Main. * - * See here for the libcap library ("POSIX draft" compliance): + * See here for the libcap2 library (compliant with Section 25 of + * the withdrawn POSIX 1003.1e Draft 17): * - * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/ + * https://www.kernel.org/pub/linux/libs/security/linux-privs/libcap2/ */ #ifndef _UAPI_LINUX_CAPABILITY_H diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h index 3b2524e4b667..ca5851e97fac 100644 --- a/include/uapi/linux/input-event-codes.h +++ b/include/uapi/linux/input-event-codes.h @@ -601,6 +601,11 @@ #define BTN_DPAD_LEFT 0x222 #define BTN_DPAD_RIGHT 0x223 +#define BTN_GRIPL 0x224 +#define BTN_GRIPR 0x225 +#define BTN_GRIPL2 0x226 +#define BTN_GRIPR2 0x227 + #define KEY_ALS_TOGGLE 0x230 /* Ambient light sensor */ #define KEY_ROTATE_LOCK_TOGGLE 0x231 /* Display rotation lock */ #define KEY_REFRESH_RATE_TOGGLE 0x232 /* Display refresh rate toggle */ @@ -765,6 +770,9 @@ #define KEY_KBD_LCD_MENU4 0x2bb #define KEY_KBD_LCD_MENU5 0x2bc +/* Performance Boost key (Alienware)/G-Mode key (Dell) */ +#define KEY_PERFORMANCE 0x2bd + #define BTN_TRIGGER_HAPPY 0x2c0 #define BTN_TRIGGER_HAPPY1 0x2c0 #define BTN_TRIGGER_HAPPY2 0x2c1 diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h index b8a0e70ee2fd..6957dc539d83 100644 --- a/include/uapi/linux/io_uring.h +++ b/include/uapi/linux/io_uring.h @@ -392,12 +392,16 @@ enum io_uring_op { * the starting buffer ID in cqe->flags as per * usual for provided buffer usage. The buffers * will be contiguous from the starting buffer ID. + * + * IORING_SEND_VECTORIZED If set, SEND[_ZC] will take a pointer to a io_vec + * to allow vectorized send operations. */ #define IORING_RECVSEND_POLL_FIRST (1U << 0) #define IORING_RECV_MULTISHOT (1U << 1) #define IORING_RECVSEND_FIXED_BUF (1U << 2) #define IORING_SEND_ZC_REPORT_USAGE (1U << 3) #define IORING_RECVSEND_BUNDLE (1U << 4) +#define IORING_SEND_VECTORIZED (1U << 5) /* * cqe.res for IORING_CQE_F_NOTIF if diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h index f29b6c44655e..c218c89e0e2e 100644 --- a/include/uapi/linux/iommufd.h +++ b/include/uapi/linux/iommufd.h @@ -56,6 +56,7 @@ enum { IOMMUFD_CMD_VDEVICE_ALLOC = 0x91, IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92, IOMMUFD_CMD_VEVENTQ_ALLOC = 0x93, + IOMMUFD_CMD_HW_QUEUE_ALLOC = 0x94, }; /** @@ -591,16 +592,43 @@ struct iommu_hw_info_arm_smmuv3 { }; /** + * struct iommu_hw_info_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Hardware + * Information (IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV) + * + * @flags: Must be 0 + * @version: Version number for the CMDQ-V HW for PARAM bits[03:00] + * @log2vcmdqs: Log2 of the total number of VCMDQs for PARAM bits[07:04] + * @log2vsids: Log2 of the total number of SID replacements for PARAM bits[15:12] + * @__reserved: Must be 0 + * + * VMM can use these fields directly in its emulated global PARAM register. Note + * that only one Virtual Interface (VINTF) should be exposed to a VM, i.e. PARAM + * bits[11:08] should be set to 0 for log2 of the total number of VINTFs. + */ +struct iommu_hw_info_tegra241_cmdqv { + __u32 flags; + __u8 version; + __u8 log2vcmdqs; + __u8 log2vsids; + __u8 __reserved; +}; + +/** * enum iommu_hw_info_type - IOMMU Hardware Info Types - * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware + * @IOMMU_HW_INFO_TYPE_NONE: Output by the drivers that do not report hardware * info + * @IOMMU_HW_INFO_TYPE_DEFAULT: Input to request for a default type * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type * @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type + * @IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) info type */ enum iommu_hw_info_type { IOMMU_HW_INFO_TYPE_NONE = 0, + IOMMU_HW_INFO_TYPE_DEFAULT = 0, IOMMU_HW_INFO_TYPE_INTEL_VTD = 1, IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2, + IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV = 3, }; /** @@ -626,6 +654,15 @@ enum iommufd_hw_capabilities { }; /** + * enum iommufd_hw_info_flags - Flags for iommu_hw_info + * @IOMMU_HW_INFO_FLAG_INPUT_TYPE: If set, @in_data_type carries an input type + * for user space to request for a specific info + */ +enum iommufd_hw_info_flags { + IOMMU_HW_INFO_FLAG_INPUT_TYPE = 1 << 0, +}; + +/** * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO) * @size: sizeof(struct iommu_hw_info) * @flags: Must be 0 @@ -634,6 +671,12 @@ enum iommufd_hw_capabilities { * data that kernel supports * @data_uptr: User pointer to a user-space buffer used by the kernel to fill * the iommu type specific hardware information data + * @in_data_type: This shares the same field with @out_data_type, making it be + * a bidirectional field. When IOMMU_HW_INFO_FLAG_INPUT_TYPE is + * set, an input type carried via this @in_data_type field will + * be valid, requesting for the info data to the given type. If + * IOMMU_HW_INFO_FLAG_INPUT_TYPE is unset, any input value will + * be seen as IOMMU_HW_INFO_TYPE_DEFAULT * @out_data_type: Output the iommu hardware info type as defined in the enum * iommu_hw_info_type. * @out_capabilities: Output the generic iommu capability info type as defined @@ -663,7 +706,10 @@ struct iommu_hw_info { __u32 dev_id; __u32 data_len; __aligned_u64 data_uptr; - __u32 out_data_type; + union { + __u32 in_data_type; + __u32 out_data_type; + }; __u8 out_max_pasid_log2; __u8 __reserved[3]; __aligned_u64 out_capabilities; @@ -951,10 +997,29 @@ struct iommu_fault_alloc { * enum iommu_viommu_type - Virtual IOMMU Type * @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use * @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type + * @IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) enabled ARM SMMUv3 type */ enum iommu_viommu_type { IOMMU_VIOMMU_TYPE_DEFAULT = 0, IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1, + IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV = 2, +}; + +/** + * struct iommu_viommu_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Virtual Interface + * (IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV) + * @out_vintf_mmap_offset: mmap offset argument for VINTF's page0 + * @out_vintf_mmap_length: mmap length argument for VINTF's page0 + * + * Both @out_vintf_mmap_offset and @out_vintf_mmap_length are reported by kernel + * for user space to mmap the VINTF page0 from the host physical address space + * to the guest physical address space so that a guest kernel can directly R/W + * access to the VINTF page0 in order to control its virtual command queues. + */ +struct iommu_viommu_tegra241_cmdqv { + __aligned_u64 out_vintf_mmap_offset; + __aligned_u64 out_vintf_mmap_length; }; /** @@ -965,6 +1030,9 @@ enum iommu_viommu_type { * @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU * @hwpt_id: ID of a nesting parent HWPT to associate to * @out_viommu_id: Output virtual IOMMU ID for the allocated object + * @data_len: Length of the type specific data + * @__reserved: Must be 0 + * @data_uptr: User pointer to a driver-specific virtual IOMMU data * * Allocate a virtual IOMMU object, representing the underlying physical IOMMU's * virtualization support that is a security-isolated slice of the real IOMMU HW @@ -985,6 +1053,9 @@ struct iommu_viommu_alloc { __u32 dev_id; __u32 hwpt_id; __u32 out_viommu_id; + __u32 data_len; + __u32 __reserved; + __aligned_u64 data_uptr; }; #define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC) @@ -995,10 +1066,15 @@ struct iommu_viommu_alloc { * @dev_id: The physical device to allocate a virtual instance on the vIOMMU * @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY * @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID - * of AMD IOMMU, and vRID of a nested Intel VT-d to a Context Table + * of AMD IOMMU, and vRID of Intel VT-d * * Allocate a virtual device instance (for a physical device) against a vIOMMU. * This instance holds the device's information (related to its vIOMMU) in a VM. + * User should use IOMMU_DESTROY to destroy the virtual device before + * destroying the physical device (by closing vfio_cdev fd). Otherwise the + * virtual device would be forcibly destroyed on physical device destruction, + * its vdevice_id would be permanently leaked (unremovable & unreusable) until + * iommu fd closed. */ struct iommu_vdevice_alloc { __u32 size; @@ -1075,10 +1151,12 @@ struct iommufd_vevent_header { * enum iommu_veventq_type - Virtual Event Queue Type * @IOMMU_VEVENTQ_TYPE_DEFAULT: Reserved for future use * @IOMMU_VEVENTQ_TYPE_ARM_SMMUV3: ARM SMMUv3 Virtual Event Queue + * @IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV Extension IRQ */ enum iommu_veventq_type { IOMMU_VEVENTQ_TYPE_DEFAULT = 0, IOMMU_VEVENTQ_TYPE_ARM_SMMUV3 = 1, + IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV = 2, }; /** @@ -1103,6 +1181,19 @@ struct iommu_vevent_arm_smmuv3 { }; /** + * struct iommu_vevent_tegra241_cmdqv - Tegra241 CMDQV IRQ + * (IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV) + * @lvcmdq_err_map: 128-bit logical vcmdq error map, little-endian. + * (Refer to register LVCMDQ_ERR_MAPs per VINTF ) + * + * The 128-bit register value from HW exclusively reflect the error bits for a + * Virtual Interface represented by a vIOMMU object. Read and report directly. + */ +struct iommu_vevent_tegra241_cmdqv { + __aligned_le64 lvcmdq_err_map[2]; +}; + +/** * struct iommu_veventq_alloc - ioctl(IOMMU_VEVENTQ_ALLOC) * @size: sizeof(struct iommu_veventq_alloc) * @flags: Must be 0 @@ -1141,4 +1232,61 @@ struct iommu_veventq_alloc { __u32 __reserved; }; #define IOMMU_VEVENTQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VEVENTQ_ALLOC) + +/** + * enum iommu_hw_queue_type - HW Queue Type + * @IOMMU_HW_QUEUE_TYPE_DEFAULT: Reserved for future use + * @IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM + * SMMUv3) Virtual Command Queue (VCMDQ) + */ +enum iommu_hw_queue_type { + IOMMU_HW_QUEUE_TYPE_DEFAULT = 0, + /* + * TEGRA241_CMDQV requirements (otherwise, allocation will fail) + * - alloc starts from the lowest @index=0 in ascending order + * - destroy starts from the last allocated @index in descending order + * - @base_addr must be aligned to @length in bytes and mapped in IOAS + * - @length must be a power of 2, with a minimum 32 bytes and a maximum + * 2 ^ idr[1].CMDQS * 16 bytes (use GET_HW_INFO call to read idr[1] + * from struct iommu_hw_info_arm_smmuv3) + * - suggest to back the queue memory with contiguous physical pages or + * a single huge page with alignment of the queue size, and limit the + * emulated vSMMU's IDR1.CMDQS to log2(huge page size / 16 bytes) + */ + IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV = 1, +}; + +/** + * struct iommu_hw_queue_alloc - ioctl(IOMMU_HW_QUEUE_ALLOC) + * @size: sizeof(struct iommu_hw_queue_alloc) + * @flags: Must be 0 + * @viommu_id: Virtual IOMMU ID to associate the HW queue with + * @type: One of enum iommu_hw_queue_type + * @index: The logical index to the HW queue per virtual IOMMU for a multi-queue + * model + * @out_hw_queue_id: The ID of the new HW queue + * @nesting_parent_iova: Base address of the queue memory in the guest physical + * address space + * @length: Length of the queue memory + * + * Allocate a HW queue object for a vIOMMU-specific HW-accelerated queue, which + * allows HW to access a guest queue memory described using @nesting_parent_iova + * and @length. + * + * A vIOMMU can allocate multiple queues, but it must use a different @index per + * type to separate each allocation, e.g:: + * + * Type1 HW queue0, Type1 HW queue1, Type2 HW queue0, ... + */ +struct iommu_hw_queue_alloc { + __u32 size; + __u32 flags; + __u32 viommu_id; + __u32 type; + __u32 index; + __u32 out_hw_queue_id; + __aligned_u64 nesting_parent_iova; + __aligned_u64 length; +}; +#define IOMMU_HW_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HW_QUEUE_ALLOC) #endif diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 5ae1741ea8ea..8958ebfcff94 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -27,6 +27,7 @@ #define KEXEC_FILE_ON_CRASH 0x00000002 #define KEXEC_FILE_NO_INITRAMFS 0x00000004 #define KEXEC_FILE_DEBUG 0x00000008 +#define KEXEC_FILE_NO_CMA 0x00000010 /* These values match the ELF architecture values. * Unless there is a good reason that should continue to be the case. diff --git a/include/uapi/linux/media/raspberrypi/pisp_be_config.h b/include/uapi/linux/media/raspberrypi/pisp_be_config.h index cbeb714f4d61..2ad3b90684d7 100644 --- a/include/uapi/linux/media/raspberrypi/pisp_be_config.h +++ b/include/uapi/linux/media/raspberrypi/pisp_be_config.h @@ -21,10 +21,11 @@ /* preferred byte alignment for outputs */ #define PISP_BACK_END_OUTPUT_MAX_ALIGN 64u -/* minimum allowed tile width anywhere in the pipeline */ -#define PISP_BACK_END_MIN_TILE_WIDTH 16u -/* minimum allowed tile width anywhere in the pipeline */ -#define PISP_BACK_END_MIN_TILE_HEIGHT 16u +/* minimum allowed tile sizes anywhere in the pipeline */ +#define PISP_BACK_END_MIN_TILE_WIDTH 16u +#define PISP_BACK_END_MIN_TILE_HEIGHT 16u +#define PISP_BACK_END_MAX_TILE_WIDTH 65536u +#define PISP_BACK_END_MAX_TILE_HEIGHT 65536u #define PISP_BACK_END_NUM_OUTPUTS 2 #define PISP_BACK_END_HOG_OUTPUT 1 diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h index a3a3e942dedf..f5b17745de60 100644 --- a/include/uapi/linux/pci_regs.h +++ b/include/uapi/linux/pci_regs.h @@ -745,6 +745,7 @@ #define PCI_EXT_CAP_ID_L1SS 0x1E /* L1 PM Substates */ #define PCI_EXT_CAP_ID_PTM 0x1F /* Precision Time Measurement */ #define PCI_EXT_CAP_ID_DVSEC 0x23 /* Designated Vendor-Specific */ +#define PCI_EXT_CAP_ID_VF_REBAR 0x24 /* VF Resizable BAR */ #define PCI_EXT_CAP_ID_DLF 0x25 /* Data Link Feature */ #define PCI_EXT_CAP_ID_PL_16GT 0x26 /* Physical Layer 16.0 GT/s */ #define PCI_EXT_CAP_ID_NPEM 0x29 /* Native PCIe Enclosure Management */ @@ -1141,6 +1142,14 @@ #define PCI_DVSEC_HEADER2 0x8 /* Designated Vendor-Specific Header2 */ #define PCI_DVSEC_HEADER2_ID(x) ((x) & 0xffff) +/* VF Resizable BARs, same layout as PCI_REBAR */ +#define PCI_VF_REBAR_CAP PCI_REBAR_CAP +#define PCI_VF_REBAR_CAP_SIZES PCI_REBAR_CAP_SIZES +#define PCI_VF_REBAR_CTRL PCI_REBAR_CTRL +#define PCI_VF_REBAR_CTRL_BAR_IDX PCI_REBAR_CTRL_BAR_IDX +#define PCI_VF_REBAR_CTRL_NBAR_MASK PCI_REBAR_CTRL_NBAR_MASK +#define PCI_VF_REBAR_CTRL_BAR_SIZE PCI_REBAR_CTRL_BAR_SIZE + /* Data Link Feature */ #define PCI_DLF_CAP 0x04 /* Capabilities Register */ #define PCI_DLF_EXCHANGE_ENABLE 0x80000000 /* Data Link Feature Exchange Enable */ diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h index d3aa8715a525..d6023a45a9d0 100644 --- a/include/uapi/linux/pcitest.h +++ b/include/uapi/linux/pcitest.h @@ -21,6 +21,7 @@ #define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int) #define PCITEST_GET_IRQTYPE _IO('P', 0x9) #define PCITEST_BARS _IO('P', 0xa) +#define PCITEST_DOORBELL _IO('P', 0xb) #define PCITEST_CLEAR_IRQ _IO('P', 0x10) #define PCITEST_IRQ_TYPE_UNDEFINED -1 diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h index ff47b6f0ba0f..b13946287277 100644 --- a/include/uapi/linux/raid/md_p.h +++ b/include/uapi/linux/raid/md_p.h @@ -173,7 +173,7 @@ typedef struct mdp_superblock_s { #else #error unspecified endianness #endif - __u32 recovery_cp; /* 11 recovery checkpoint sector count */ + __u32 resync_offset; /* 11 resync checkpoint sector count */ /* There are only valid for minor_version > 90 */ __u64 reshape_position; /* 12,13 next address in array-space for reshape */ __u32 new_level; /* 14 new level we are reshaping to */ diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h index 2d995f3c1ca3..3b060ea6eed7 100644 --- a/include/uapi/linux/rkisp1-config.h +++ b/include/uapi/linux/rkisp1-config.h @@ -170,6 +170,13 @@ #define RKISP1_CIF_ISP_COMPAND_NUM_POINTS 64 /* + * Wide Dynamic Range + */ +#define RKISP1_CIF_ISP_WDR_CURVE_NUM_INTERV 32 +#define RKISP1_CIF_ISP_WDR_CURVE_NUM_COEFF (RKISP1_CIF_ISP_WDR_CURVE_NUM_INTERV + 1) +#define RKISP1_CIF_ISP_WDR_CURVE_NUM_DY_REGS 4 + +/* * Measurement types */ #define RKISP1_CIF_ISP_STAT_AWB (1U << 0) @@ -889,6 +896,72 @@ struct rkisp1_cif_isp_compand_curve_config { __u32 y[RKISP1_CIF_ISP_COMPAND_NUM_POINTS]; }; +/** + * struct rkisp1_cif_isp_wdr_tone_curve - Tone mapping curve definition for WDR. + * + * @dY: the dYn increments for horizontal (input) axis of the tone curve. + * each 3-bit dY value represents an increment of 2**(value+3). + * dY[0] bits 0:2 is increment dY1, bit 3 unused + * dY[0] bits 4:6 is increment dY2, bit 7 unused + * ... + * dY[0] bits 28:30 is increment dY8, bit 31 unused + * ... and so on till dY[3] bits 28:30 is increment dY32, bit 31 unused. + * @ym: the Ym values for the vertical (output) axis of the tone curve. + * each value is 13 bit. + */ +struct rkisp1_cif_isp_wdr_tone_curve { + __u32 dY[RKISP1_CIF_ISP_WDR_CURVE_NUM_DY_REGS]; + __u16 ym[RKISP1_CIF_ISP_WDR_CURVE_NUM_COEFF]; +}; + +/** + * struct rkisp1_cif_isp_wdr_iref_config - Illumination reference config for WDR. + * + * Use illumination reference value as described below, instead of only the + * luminance (Y) value for tone mapping and gain calculations: + * IRef = (rgb_factor * RGBMax_tr + (8 - rgb_factor) * Y)/8 + * + * @rgb_factor: defines how much influence the RGBmax approach has in + * comparison to Y (valid values are 0..8). + * @use_y9_8: use Y*9/8 for maximum value calculation along with the + * default of R, G, B for noise reduction. + * @use_rgb7_8: decrease RGBMax by 7/8 for noise reduction. + * @disable_transient: disable transient calculation between Y and RGBY_max. + */ +struct rkisp1_cif_isp_wdr_iref_config { + __u8 rgb_factor; + __u8 use_y9_8; + __u8 use_rgb7_8; + __u8 disable_transient; +}; + +/** + * struct rkisp1_cif_isp_wdr_config - Configuration for wide dynamic range. + * + * @tone_curve: tone mapping curve. + * @iref_config: illumination reference configuration. (when use_iref is true) + * @rgb_offset: RGB offset value for RGB operation mode. (12 bits) + * @luma_offset: luminance offset value for RGB operation mode. (12 bits) + * @dmin_thresh: lower threshold for deltaMin value. (12 bits) + * @dmin_strength: strength factor for deltaMin. (valid range is 0x00..0x10) + * @use_rgb_colorspace: use RGB instead of luminance/chrominance colorspace. + * @bypass_chroma_mapping: disable chrominance mapping (only valid if + * use_rgb_colorspace = 0) + * @use_iref: use illumination reference instead of Y for tone mapping + * and gain calculations. + */ +struct rkisp1_cif_isp_wdr_config { + struct rkisp1_cif_isp_wdr_tone_curve tone_curve; + struct rkisp1_cif_isp_wdr_iref_config iref_config; + __u16 rgb_offset; + __u16 luma_offset; + __u16 dmin_thresh; + __u8 dmin_strength; + __u8 use_rgb_colorspace; + __u8 bypass_chroma_mapping; + __u8 use_iref; +}; + /*---------- PART2: Measurement Statistics ------------*/ /** @@ -1059,6 +1132,7 @@ struct rkisp1_stat_buffer { * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS: BLS in the compand block * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND: Companding expand curve * @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS: Companding compress curve + * @RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR: Wide dynamic range */ enum rkisp1_ext_params_block_type { RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS, @@ -1081,11 +1155,15 @@ enum rkisp1_ext_params_block_type { RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS, RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND, RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS, + RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR, }; #define RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE (1U << 0) #define RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE (1U << 1) +/* A bitmask of parameters blocks supported on the current hardware. */ +#define RKISP1_CID_SUPPORTED_PARAMS_BLOCKS (V4L2_CID_USER_RKISP1_BASE + 0x01) + /** * struct rkisp1_ext_params_block_header - RkISP1 extensible parameters block * header @@ -1460,6 +1538,23 @@ struct rkisp1_ext_params_compand_curve_config { struct rkisp1_cif_isp_compand_curve_config config; } __attribute__((aligned(8))); +/** + * struct rkisp1_ext_params_wdr_config - RkISP1 extensible params + * Wide dynamic range config + * + * RkISP1 extensible parameters WDR block. + * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR` + * + * @header: The RkISP1 extensible parameters header, see + * :c:type:`rkisp1_ext_params_block_header` + * @config: WDR configuration, see + * :c:type:`rkisp1_cif_isp_wdr_config` + */ +struct rkisp1_ext_params_wdr_config { + struct rkisp1_ext_params_block_header header; + struct rkisp1_cif_isp_wdr_config config; +} __attribute__((aligned(8))); + /* * The rkisp1_ext_params_compand_curve_config structure is counted twice as it * is used for both the COMPAND_EXPAND and COMPAND_COMPRESS block types. @@ -1484,7 +1579,8 @@ struct rkisp1_ext_params_compand_curve_config { sizeof(struct rkisp1_ext_params_afc_config) +\ sizeof(struct rkisp1_ext_params_compand_bls_config) +\ sizeof(struct rkisp1_ext_params_compand_curve_config) +\ - sizeof(struct rkisp1_ext_params_compand_curve_config)) + sizeof(struct rkisp1_ext_params_compand_curve_config) +\ + sizeof(struct rkisp1_ext_params_wdr_config)) /** * enum rksip1_ext_param_buffer_version - RkISP1 extensible parameters version @@ -1520,6 +1616,14 @@ enum rksip1_ext_param_buffer_version { * V4L2 control. If such control is not available, userspace should assume only * RKISP1_EXT_PARAM_BUFFER_V1 is supported by the driver. * + * The read-only V4L2 control ``RKISP1_CID_SUPPORTED_PARAMS_BLOCKS`` can be used + * to query the blocks supported by the device. It contains a bitmask where each + * bit represents the availability of the corresponding entry from the + * :c:type:`rkisp1_ext_params_block_type` enum. The current and default values + * of the control represents the blocks supported by the device instance, while + * the maximum value represents the blocks supported by the kernel driver, + * independently of the device instance. + * * For each ISP block that userspace wants to configure, a block-specific * structure is appended to the @data buffer, one after the other without gaps * in between nor overlaps. Userspace shall populate the @data_size field with diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h index 72e32814ea83..f836512e9deb 100644 --- a/include/uapi/linux/v4l2-controls.h +++ b/include/uapi/linux/v4l2-controls.h @@ -222,6 +222,12 @@ enum v4l2_colorfx { */ #define V4L2_CID_USER_UVC_BASE (V4L2_CID_USER_BASE + 0x11e0) +/* + * The base for Rockchip ISP1 driver controls. + * We reserve 16 controls for this driver. + */ +#define V4L2_CID_USER_RKISP1_BASE (V4L2_CID_USER_BASE + 0x1220) + /* MPEG-class control IDs */ /* The MPEG controls are applicable to all codec controls * and the 'MPEG' part of the define is historical */ diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h index 5764f315137f..75100bf009ba 100644 --- a/include/uapi/linux/vfio.h +++ b/include/uapi/linux/vfio.h @@ -905,10 +905,12 @@ struct vfio_device_feature { * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18, * struct vfio_device_bind_iommufd) * @argsz: User filled size of this data. - * @flags: Must be 0. + * @flags: Must be 0 or a bit flags of VFIO_DEVICE_BIND_* * @iommufd: iommufd to bind. * @out_devid: The device id generated by this bind. devid is a handle for * this device/iommufd bond and can be used in IOMMUFD commands. + * @token_uuid_ptr: Valid if VFIO_DEVICE_BIND_FLAG_TOKEN. Points to a 16 byte + * UUID in the same format as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN. * * Bind a vfio_device to the specified iommufd. * @@ -917,13 +919,21 @@ struct vfio_device_feature { * * Unbind is automatically conducted when device fd is closed. * + * A token is sometimes required to open the device, unless this is known to be + * needed VFIO_DEVICE_BIND_FLAG_TOKEN should not be set and token_uuid_ptr is + * ignored. The only case today is a PF/VF relationship where the VF bind must + * be provided the same token as VFIO_DEVICE_FEATURE_PCI_VF_TOKEN provided to + * the PF. + * * Return: 0 on success, -errno on failure. */ struct vfio_device_bind_iommufd { __u32 argsz; __u32 flags; +#define VFIO_DEVICE_BIND_FLAG_TOKEN (1 << 0) __s32 iommufd; __u32 out_devid; + __aligned_u64 token_uuid_ptr; }; #define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18) diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h index d6ad01fbb8d2..283348b64af9 100644 --- a/include/uapi/linux/vhost.h +++ b/include/uapi/linux/vhost.h @@ -242,4 +242,32 @@ #define VHOST_SET_FEATURES_ARRAY _IOW(VHOST_VIRTIO, 0x83, \ struct vhost_features_array) +/* fork_owner values for vhost */ +#define VHOST_FORK_OWNER_KTHREAD 0 +#define VHOST_FORK_OWNER_TASK 1 + +/** + * VHOST_SET_FORK_FROM_OWNER - Set the fork_owner flag for the vhost device, + * This ioctl must called before VHOST_SET_OWNER. + * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y + * + * @param fork_owner: An 8-bit value that determines the vhost thread mode + * + * When fork_owner is set to VHOST_FORK_OWNER_TASK(default value): + * - Vhost will create vhost worker as tasks forked from the owner, + * inheriting all of the owner's attributes. + * + * When fork_owner is set to VHOST_FORK_OWNER_KTHREAD: + * - Vhost will create vhost workers as kernel threads. + */ +#define VHOST_SET_FORK_FROM_OWNER _IOW(VHOST_VIRTIO, 0x83, __u8) + +/** + * VHOST_GET_FORK_OWNER - Get the current fork_owner flag for the vhost device. + * Only available when CONFIG_VHOST_ENABLE_FORK_OWNER_CONTROL=y + * + * @return: An 8-bit value indicating the current thread mode. + */ +#define VHOST_GET_FORK_FROM_OWNER _IOR(VHOST_VIRTIO, 0x84, __u8) + #endif diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index 9e3b366d5fc7..3dd9fa45dde1 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -726,7 +726,7 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */ #define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */ #define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */ - /* 12bit raw bayer packed, 6 bytes for every 4 pixels */ + /* 12bit raw bayer packed, 3 bytes for every 2 pixels */ #define V4L2_PIX_FMT_SBGGR12P v4l2_fourcc('p', 'B', 'C', 'C') #define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C') #define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C') @@ -840,6 +840,12 @@ struct v4l2_pix_format { #define V4L2_PIX_FMT_PISP_COMP2_BGGR v4l2_fourcc('P', 'C', '2', 'B') /* PiSP 8-bit mode 2 compressed BGGR bayer */ #define V4L2_PIX_FMT_PISP_COMP2_MONO v4l2_fourcc('P', 'C', '2', 'M') /* PiSP 8-bit mode 2 compressed monochrome */ +/* Renesas RZ/V2H CRU packed formats. 64-bit units with contiguous pixels */ +#define V4L2_PIX_FMT_RAW_CRU10 v4l2_fourcc('C', 'R', '1', '0') +#define V4L2_PIX_FMT_RAW_CRU12 v4l2_fourcc('C', 'R', '1', '2') +#define V4L2_PIX_FMT_RAW_CRU14 v4l2_fourcc('C', 'R', '1', '4') +#define V4L2_PIX_FMT_RAW_CRU20 v4l2_fourcc('C', 'R', '2', '0') + /* SDR formats - used only for Software Defined Radio devices */ #define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */ #define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */ @@ -861,6 +867,7 @@ struct v4l2_pix_format { #define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */ #define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */ #define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */ +#define V4L2_META_FMT_UVC_MSXU_1_5 v4l2_fourcc('U', 'V', 'C', 'M') /* UVC MSXU metadata */ #define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */ /* Vendor specific - used for RK_ISP1 camera sub-system */ diff --git a/include/uapi/linux/vt.h b/include/uapi/linux/vt.h index b60fcdfb2746..714483d68c69 100644 --- a/include/uapi/linux/vt.h +++ b/include/uapi/linux/vt.h @@ -14,9 +14,9 @@ /* Note: the ioctl VT_GETSTATE does not work for consoles 16 and higher (since it returns a short) */ -/* 'V' to avoid collision with termios and kd */ +/* 0x56 is 'V', to avoid collision with termios and kd */ -#define VT_OPENQRY _IO('V', 0x00) /* find available vt */ +#define VT_OPENQRY 0x5600 /* find available vt */ struct vt_mode { __u8 mode; /* vt mode */ @@ -25,8 +25,8 @@ struct vt_mode { __s16 acqsig; /* signal to raise on acquisition */ __s16 frsig; /* unused (set to 0) */ }; -#define VT_GETMODE _IO('V', 0x01) /* get mode of active vt */ -#define VT_SETMODE _IO('V', 0x02) /* set mode of active vt */ +#define VT_GETMODE 0x5601 /* get mode of active vt */ +#define VT_SETMODE 0x5602 /* set mode of active vt */ #define VT_AUTO 0x00 /* auto vt switching */ #define VT_PROCESS 0x01 /* process controls switching */ #define VT_ACKACQ 0x02 /* acknowledge switch */ @@ -36,21 +36,21 @@ struct vt_stat { __u16 v_signal; /* signal to send */ __u16 v_state; /* vt bitmask */ }; -#define VT_GETSTATE _IO('V', 0x03) /* get global vt state info */ -#define VT_SENDSIG _IO('V', 0x04) /* signal to send to bitmask of vts */ +#define VT_GETSTATE 0x5603 /* get global vt state info */ +#define VT_SENDSIG 0x5604 /* signal to send to bitmask of vts */ -#define VT_RELDISP _IO('V', 0x05) /* release display */ +#define VT_RELDISP 0x5605 /* release display */ -#define VT_ACTIVATE _IO('V', 0x06) /* make vt active */ -#define VT_WAITACTIVE _IO('V', 0x07) /* wait for vt active */ -#define VT_DISALLOCATE _IO('V', 0x08) /* free memory associated to vt */ +#define VT_ACTIVATE 0x5606 /* make vt active */ +#define VT_WAITACTIVE 0x5607 /* wait for vt active */ +#define VT_DISALLOCATE 0x5608 /* free memory associated to vt */ struct vt_sizes { __u16 v_rows; /* number of rows */ __u16 v_cols; /* number of columns */ __u16 v_scrollsize; /* number of lines of scrollback */ }; -#define VT_RESIZE _IO('V', 0x09) /* set kernel's idea of screensize */ +#define VT_RESIZE 0x5609 /* set kernel's idea of screensize */ struct vt_consize { __u16 v_rows; /* number of rows */ @@ -60,10 +60,10 @@ struct vt_consize { __u16 v_vcol; /* number of pixel columns on screen */ __u16 v_ccol; /* number of pixel columns per character */ }; -#define VT_RESIZEX _IO('V', 0x0A) /* set kernel's idea of screensize + more */ -#define VT_LOCKSWITCH _IO('V', 0x0B) /* disallow vt switching */ -#define VT_UNLOCKSWITCH _IO('V', 0x0C) /* allow vt switching */ -#define VT_GETHIFONTMASK _IO('V', 0x0D) /* return hi font mask */ +#define VT_RESIZEX 0x560A /* set kernel's idea of screensize + more */ +#define VT_LOCKSWITCH 0x560B /* disallow vt switching */ +#define VT_UNLOCKSWITCH 0x560C /* allow vt switching */ +#define VT_GETHIFONTMASK 0x560D /* return hi font mask */ struct vt_event { __u32 event; @@ -77,14 +77,14 @@ struct vt_event { __u32 pad[4]; /* Padding for expansion */ }; -#define VT_WAITEVENT _IO('V', 0x0E) /* Wait for an event */ +#define VT_WAITEVENT 0x560E /* Wait for an event */ struct vt_setactivate { __u32 console; struct vt_mode mode; }; -#define VT_SETACTIVATE _IO('V', 0x0F) /* Activate and set the mode of a console */ +#define VT_SETACTIVATE 0x560F /* Activate and set the mode of a console */ /* get console size and cursor position */ struct vt_consizecsrpos { diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h index 11b94b0b035b..98b71b9979f8 100644 --- a/include/uapi/rdma/efa-abi.h +++ b/include/uapi/rdma/efa-abi.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */ /* - * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef EFA_ABI_USER_H @@ -131,6 +131,7 @@ enum { EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128 = 1 << 4, EFA_QUERY_DEVICE_CAPS_RDMA_WRITE = 1 << 5, EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV = 1 << 6, + EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM = 1 << 7, }; struct efa_ibv_ex_query_device_resp { diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h index ac7b162611ed..de6f5a94f1e3 100644 --- a/include/uapi/rdma/ib_user_ioctl_cmds.h +++ b/include/uapi/rdma/ib_user_ioctl_cmds.h @@ -55,6 +55,7 @@ enum uverbs_default_objects { UVERBS_OBJECT_DM, UVERBS_OBJECT_COUNTERS, UVERBS_OBJECT_ASYNC_EVENT, + UVERBS_OBJECT_DMAH, }; enum { @@ -105,6 +106,10 @@ enum uverbs_attrs_create_cq_cmd_attr_ids { UVERBS_ATTR_CREATE_CQ_FLAGS, UVERBS_ATTR_CREATE_CQ_RESP_CQE, UVERBS_ATTR_CREATE_CQ_EVENT_FD, + UVERBS_ATTR_CREATE_CQ_BUFFER_VA, + UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH, + UVERBS_ATTR_CREATE_CQ_BUFFER_FD, + UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET, }; enum uverbs_attrs_destroy_cq_cmd_attr_ids { @@ -236,6 +241,22 @@ enum uverbs_methods_dm { UVERBS_METHOD_DM_FREE, }; +enum uverbs_attrs_alloc_dmah_cmd_attr_ids { + UVERBS_ATTR_ALLOC_DMAH_HANDLE, + UVERBS_ATTR_ALLOC_DMAH_CPU_ID, + UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE, + UVERBS_ATTR_ALLOC_DMAH_PH, +}; + +enum uverbs_attrs_free_dmah_cmd_attr_ids { + UVERBS_ATTR_FREE_DMA_HANDLE, +}; + +enum uverbs_methods_dmah { + UVERBS_METHOD_DMAH_ALLOC, + UVERBS_METHOD_DMAH_FREE, +}; + enum uverbs_attrs_reg_dm_mr_cmd_attr_ids { UVERBS_ATTR_REG_DM_MR_HANDLE, UVERBS_ATTR_REG_DM_MR_OFFSET, @@ -253,6 +274,7 @@ enum uverbs_methods_mr { UVERBS_METHOD_ADVISE_MR, UVERBS_METHOD_QUERY_MR, UVERBS_METHOD_REG_DMABUF_MR, + UVERBS_METHOD_REG_MR, }; enum uverbs_attrs_mr_destroy_ids { @@ -286,6 +308,20 @@ enum uverbs_attrs_reg_dmabuf_mr_cmd_attr_ids { UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY, }; +enum uverbs_attrs_reg_mr_cmd_attr_ids { + UVERBS_ATTR_REG_MR_HANDLE, + UVERBS_ATTR_REG_MR_PD_HANDLE, + UVERBS_ATTR_REG_MR_DMA_HANDLE, + UVERBS_ATTR_REG_MR_IOVA, + UVERBS_ATTR_REG_MR_ADDR, + UVERBS_ATTR_REG_MR_LENGTH, + UVERBS_ATTR_REG_MR_ACCESS_FLAGS, + UVERBS_ATTR_REG_MR_FD, + UVERBS_ATTR_REG_MR_FD_OFFSET, + UVERBS_ATTR_REG_MR_RESP_LKEY, + UVERBS_ATTR_REG_MR_RESP_RKEY, +}; + enum uverbs_attrs_create_counters_cmd_attr_ids { UVERBS_ATTR_CREATE_COUNTERS_HANDLE, }; diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h index c0c59a8f7256..72fd385037a6 100644 --- a/include/ufs/ufs.h +++ b/include/ufs/ufs.h @@ -182,6 +182,11 @@ enum attr_idn { QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F, QUERY_ATTR_IDN_TIMESTAMP = 0x30, QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID = 0x34, + QUERY_ATTR_IDN_HID_DEFRAG_OPERATION = 0x35, + QUERY_ATTR_IDN_HID_AVAILABLE_SIZE = 0x36, + QUERY_ATTR_IDN_HID_SIZE = 0x37, + QUERY_ATTR_IDN_HID_PROGRESS_RATIO = 0x38, + QUERY_ATTR_IDN_HID_STATE = 0x39, QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT = 0x3C, QUERY_ATTR_IDN_WB_BUF_RESIZE_EN = 0x3D, QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS = 0x3E, @@ -401,6 +406,7 @@ enum { UFS_DEV_HPB_SUPPORT = BIT(7), UFS_DEV_WRITE_BOOSTER_SUP = BIT(8), UFS_DEV_LVL_EXCEPTION_SUP = BIT(12), + UFS_DEV_HID_SUPPORT = BIT(13), }; #define UFS_DEV_HPB_SUPPORT_VERSION 0x310 @@ -466,6 +472,24 @@ enum ufs_ref_clk_freq { REF_CLK_FREQ_INVAL = -1, }; +/* bDefragOperation attribute values */ +enum ufs_hid_defrag_operation { + HID_ANALYSIS_AND_DEFRAG_DISABLE = 0, + HID_ANALYSIS_ENABLE = 1, + HID_ANALYSIS_AND_DEFRAG_ENABLE = 2, +}; + +/* bHIDState attribute values */ +enum ufs_hid_state { + HID_IDLE = 0, + ANALYSIS_IN_PROGRESS = 1, + DEFRAG_REQUIRED = 2, + DEFRAG_IN_PROGRESS = 3, + DEFRAG_COMPLETED = 4, + DEFRAG_NOT_REQUIRED = 5, + NUM_UFS_HID_STATES = 6, +}; + /* bWriteBoosterBufferResizeEn attribute */ enum wb_resize_en { WB_RESIZE_EN_IDLE = 0, @@ -625,6 +649,8 @@ struct ufs_dev_info { u32 rtc_update_period; u8 rtt_cap; /* bDeviceRTTCap */ + + bool hid_sup; }; /* diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h index 9b3515cee711..1d3943777584 100644 --- a/include/ufs/ufshcd.h +++ b/include/ufs/ufshcd.h @@ -1480,6 +1480,7 @@ void ufshcd_resume_complete(struct device *dev); bool ufshcd_is_hba_active(struct ufs_hba *hba); void ufshcd_pm_qos_init(struct ufs_hba *hba); void ufshcd_pm_qos_exit(struct ufs_hba *hba); +int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask, u32 val, u32 attr); /* Wrapper functions for safely calling variant operations */ static inline int ufshcd_vops_init(struct ufs_hba *hba) diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h index 1c23e6387f13..7dab04cf4a36 100644 --- a/include/xen/xenbus.h +++ b/include/xen/xenbus.h @@ -178,7 +178,7 @@ int xenbus_printf(struct xenbus_transaction t, * sprintf-style type string, and pointer. Returns 0 or errno.*/ int xenbus_gather(struct xenbus_transaction t, const char *dir, ...); -/* notifer routines for when the xenstore comes up */ +/* notifier routines for when the xenstore comes up */ extern int xenstored_ready; int register_xenstore_notifier(struct notifier_block *nb); void unregister_xenstore_notifier(struct notifier_block *nb); |