summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/codetag.lds.h16
-rw-r--r--include/asm-generic/hugetlb.h17
-rw-r--r--include/asm-generic/msi.h1
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/crypto/engine.h1
-rw-r--r--include/crypto/internal/acompress.h5
-rw-r--r--include/crypto/internal/engine.h15
-rw-r--r--include/crypto/internal/hash.h36
-rw-r--r--include/drm/amd/isp.h51
-rw-r--r--include/drm/display/drm_dp.h24
-rw-r--r--include/drm/display/drm_dp_helper.h17
-rw-r--r--include/drm/display/drm_hdmi_audio_helper.h1
-rw-r--r--include/drm/display/drm_hdmi_cec_helper.h72
-rw-r--r--include/drm/drm_accel.h5
-rw-r--r--include/drm/drm_bridge.h375
-rw-r--r--include/drm/drm_color_mgmt.h27
-rw-r--r--include/drm/drm_connector.h60
-rw-r--r--include/drm/drm_debugfs.h11
-rw-r--r--include/drm/drm_device.h11
-rw-r--r--include/drm/drm_drv.h22
-rw-r--r--include/drm/drm_edid.h8
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_format_helper.h19
-rw-r--r--include/drm/drm_fourcc.h3
-rw-r--r--include/drm/drm_gem.h13
-rw-r--r--include/drm/drm_gem_framebuffer_helper.h6
-rw-r--r--include/drm/drm_gem_shmem_helper.h11
-rw-r--r--include/drm/drm_gem_vram_helper.h2
-rw-r--r--include/drm/drm_gpusvm.h101
-rw-r--r--include/drm/drm_gpuvm.h8
-rw-r--r--include/drm/drm_managed.h15
-rw-r--r--include/drm/drm_mipi_dsi.h2
-rw-r--r--include/drm/drm_mode_config.h3
-rw-r--r--include/drm/drm_modeset_helper.h2
-rw-r--r--include/drm/drm_pagemap.h135
-rw-r--r--include/drm/drm_panic.h6
-rw-r--r--include/drm/drm_prime.h3
-rw-r--r--include/drm/gpu_scheduler.h40
-rw-r--r--include/drm/intel/pciids.h12
-rw-r--r--include/drm/ttm/ttm_bo.h73
-rw-r--r--include/drm/ttm/ttm_device.h1
-rw-r--r--include/dt-bindings/clock/nxp,imx94-clock.h13
-rw-r--r--include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h16
-rw-r--r--include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h22
-rw-r--r--include/dt-bindings/clock/qcom,milos-camcc.h131
-rw-r--r--include/dt-bindings/clock/qcom,milos-dispcc.h61
-rw-r--r--include/dt-bindings/clock/qcom,milos-gcc.h210
-rw-r--r--include/dt-bindings/clock/qcom,milos-gpucc.h56
-rw-r--r--include/dt-bindings/clock/qcom,milos-videocc.h36
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-camcc.h110
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-dispcc.h52
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-gpucc.h39
-rw-r--r--include/dt-bindings/clock/qcom,qcs615-videocc.h30
-rw-r--r--include/dt-bindings/clock/qcom,x1e80100-gcc.h2
-rw-r--r--include/dt-bindings/clock/r9a07g043-cpg.h53
-rw-r--r--include/dt-bindings/clock/r9a07g044-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a07g054-cpg.h58
-rw-r--r--include/dt-bindings/clock/r9a08g045-cpg.h71
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h29
-rw-r--r--include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h29
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov920.h9
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h1
-rw-r--r--include/kvm/arm_vgic.h11
-rw-r--r--include/linux/adi-axi-common.h56
-rw-r--r--include/linux/alloc_tag.h6
-rw-r--r--include/linux/amd-iommu.h25
-rw-r--r--include/linux/balloon_compaction.h90
-rw-r--r--include/linux/bcm47xx_nvram.h1
-rw-r--r--include/linux/bcm47xx_sprom.h2
-rw-r--r--include/linux/bitfield.h8
-rw-r--r--include/linux/bits.h29
-rw-r--r--include/linux/cgroup-defs.h21
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/codetag.h1
-rw-r--r--include/linux/coredump.h2
-rw-r--r--include/linux/cpumask.h40
-rw-r--r--include/linux/crypto.h3
-rw-r--r--include/linux/damon.h80
-rw-r--r--include/linux/dax.h9
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/dma-fence.h45
-rw-r--r--include/linux/dmapool.h8
-rw-r--r--include/linux/find.h29
-rw-r--r--include/linux/fpga/adi-axi-common.h23
-rw-r--r--include/linux/fprobe.h5
-rw-r--r--include/linux/fs.h25
-rw-r--r--include/linux/fsnotify.h35
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/gfp.h7
-rw-r--r--include/linux/hid.h8
-rw-r--r--include/linux/highmem-internal.h2
-rw-r--r--include/linux/highmem.h12
-rw-r--r--include/linux/hisi_acc_qm.h4
-rw-r--r--include/linux/huge_mm.h52
-rw-r--r--include/linux/hugetlb.h20
-rw-r--r--include/linux/intel_dg_nvm_aux.h32
-rw-r--r--include/linux/iommu.h74
-rw-r--r--include/linux/iommufd.h196
-rw-r--r--include/linux/irq-entry-common.h16
-rw-r--r--include/linux/irqbypass.h46
-rw-r--r--include/linux/irqchip/arm-gic-v4.h2
-rw-r--r--include/linux/irqchip/arm-gic-v5.h394
-rw-r--r--include/linux/irqchip/arm-vgic-info.h4
-rw-r--r--include/linux/irqdomain.h3
-rw-r--r--include/linux/kernel.h6
-rw-r--r--include/linux/khugepaged.h4
-rw-r--r--include/linux/ksm.h12
-rw-r--r--include/linux/kvm_dirty_ring.h18
-rw-r--r--include/linux/kvm_host.h36
-rw-r--r--include/linux/kvm_irqfd.h5
-rw-r--r--include/linux/led-class-flash.h2
-rw-r--r--include/linux/leds.h1
-rw-r--r--include/linux/libnvdimm.h15
-rw-r--r--include/linux/llist.h6
-rw-r--r--include/linux/maple_tree.h4
-rw-r--r--include/linux/memcontrol.h44
-rw-r--r--include/linux/memfd.h4
-rw-r--r--include/linux/memory-tiers.h2
-rw-r--r--include/linux/memory.h20
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/mfd/davinci_voicecodec.h8
-rw-r--r--include/linux/mfd/madera/pdata.h3
-rw-r--r--include/linux/mfd/pcf50633/core.h229
-rw-r--r--include/linux/mfd/rk808.h2
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h8
-rw-r--r--include/linux/mfd/tps65219.h5
-rw-r--r--include/linux/mfd/twl.h21
-rw-r--r--include/linux/mfd/wm8350/core.h10
-rw-r--r--include/linux/migrate.h46
-rw-r--r--include/linux/mlx5/driver.h25
-rw-r--r--include/linux/mlx5/mlx5_ifc.h14
-rw-r--r--include/linux/mm.h81
-rw-r--r--include/linux/mm_types.h3
-rw-r--r--include/linux/mman.h4
-rw-r--r--include/linux/mmap_lock.h11
-rw-r--r--include/linux/mmdebug.h12
-rw-r--r--include/linux/mmzone.h36
-rw-r--r--include/linux/module.h6
-rw-r--r--include/linux/msi.h1
-rw-r--r--include/linux/mtd/map.h13
-rw-r--r--include/linux/mtd/spinand.h70
-rw-r--r--include/linux/mtd/ubi.h1
-rw-r--r--include/linux/node.h77
-rw-r--r--include/linux/nodemask.h18
-rw-r--r--include/linux/of_irq.h5
-rw-r--r--include/linux/padata.h4
-rw-r--r--include/linux/page-flags.h106
-rw-r--r--include/linux/page-isolation.h47
-rw-r--r--include/linux/page_owner.h8
-rw-r--r--include/linux/pageblock-flags.h56
-rw-r--r--include/linux/pagemap.h14
-rw-r--r--include/linux/pagewalk.h9
-rw-r--r--include/linux/panic.h3
-rw-r--r--include/linux/pci-tph.h1
-rw-r--r--include/linux/percpu-defs.h7
-rw-r--r--include/linux/pfn.h9
-rw-r--r--include/linux/pfn_t.h131
-rw-r--r--include/linux/pgtable.h118
-rw-r--r--include/linux/platform_data/emc2305.h6
-rw-r--r--include/linux/platform_data/video-pxafb.h1
-rw-r--r--include/linux/power_supply.h16
-rw-r--r--include/linux/printk.h7
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/ring_buffer.h4
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/rv.h86
-rw-r--r--include/linux/sched.h15
-rw-r--r--include/linux/sched/ext.h23
-rw-r--r--include/linux/shmem_fs.h5
-rw-r--r--include/linux/soc/qcom/ubwc.h75
-rw-r--r--include/linux/spi/spi-mem.h2
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h23
-rw-r--r--include/linux/sysfb.h6
-rw-r--r--include/linux/usb/uvc.h3
-rw-r--r--include/linux/userfaultfd_k.h15
-rw-r--r--include/linux/vmstat.h4
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/workqueue.h34
-rw-r--r--include/linux/writeback.h11
-rw-r--r--include/linux/zsmalloc.h2
-rw-r--r--include/media/rcar-fcp.h5
-rw-r--r--include/media/v4l2-ctrls.h4
-rw-r--r--include/media/v4l2-dev.h14
-rw-r--r--include/media/v4l2-ioctl.h1
-rw-r--r--include/media/v4l2-jpeg.h9
-rw-r--r--include/media/v4l2-subdev.h3
-rw-r--r--include/media/vsp1.h89
-rw-r--r--include/ras/ras_event.h2
-rw-r--r--include/rdma/ib_umem.h25
-rw-r--r--include/rdma/ib_verbs.h65
-rw-r--r--include/rdma/restrack.h4
-rw-r--r--include/rv/da_monitor.h172
-rw-r--r--include/rv/ltl_monitor.h186
-rw-r--r--include/scsi/scsi_device.h5
-rw-r--r--include/scsi/scsi_transport_fc.h5
-rw-r--r--include/soc/spacemit/k1-syscon.h160
-rw-r--r--include/trace/events/alarmtimer.h2
-rw-r--r--include/trace/events/btrfs.h7
-rw-r--r--include/trace/events/cgroup.h47
-rw-r--r--include/trace/events/damon.h41
-rw-r--r--include/trace/events/dma_fence.h38
-rw-r--r--include/trace/events/ext4.h50
-rw-r--r--include/trace/events/fs_dax.h6
-rw-r--r--include/trace/events/ipi.h58
-rw-r--r--include/trace/events/kmem.h38
-rw-r--r--include/trace/events/kvm.h111
-rw-r--r--include/trace/events/mmap.h52
-rw-r--r--include/trace/events/power.h28
-rw-r--r--include/trace/events/sched.h14
-rw-r--r--include/trace/events/scsi.h13
-rw-r--r--include/trace/events/thp.h2
-rw-r--r--include/trace/events/writeback.h8
-rw-r--r--include/uapi/drm/amdgpu_drm.h2
-rw-r--r--include/uapi/drm/drm_fourcc.h56
-rw-r--r--include/uapi/drm/ivpu_accel.h14
-rw-r--r--include/uapi/drm/msm_drm.h149
-rw-r--r--include/uapi/drm/panfrost_drm.h21
-rw-r--r--include/uapi/drm/panthor_drm.h41
-rw-r--r--include/uapi/drm/xe_drm.h12
-rw-r--r--include/uapi/linux/capability.h5
-rw-r--r--include/uapi/linux/iommufd.h154
-rw-r--r--include/uapi/linux/kvm.h2
-rw-r--r--include/uapi/linux/media/raspberrypi/pisp_be_config.h9
-rw-r--r--include/uapi/linux/rkisp1-config.h106
-rw-r--r--include/uapi/linux/v4l2-controls.h6
-rw-r--r--include/uapi/linux/videodev2.h9
-rw-r--r--include/uapi/rdma/efa-abi.h3
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h36
-rw-r--r--include/ufs/ufs.h26
-rw-r--r--include/ufs/ufshcd.h1
-rw-r--r--include/video/edid.h3
-rw-r--r--include/video/sisfb.h6
-rw-r--r--include/xen/xen-ops.h2
-rw-r--r--include/xen/xenbus.h2
235 files changed, 5166 insertions, 2295 deletions
diff --git a/include/asm-generic/codetag.lds.h b/include/asm-generic/codetag.lds.h
index 372c320c5043..a14f4bdafdda 100644
--- a/include/asm-generic/codetag.lds.h
+++ b/include/asm-generic/codetag.lds.h
@@ -2,6 +2,12 @@
#ifndef __ASM_GENERIC_CODETAG_LDS_H
#define __ASM_GENERIC_CODETAG_LDS_H
+#ifdef CONFIG_MEM_ALLOC_PROFILING
+#define IF_MEM_ALLOC_PROFILING(...) __VA_ARGS__
+#else
+#define IF_MEM_ALLOC_PROFILING(...)
+#endif
+
#define SECTION_WITH_BOUNDARIES(_name) \
. = ALIGN(8); \
__start_##_name = .; \
@@ -9,13 +15,7 @@
__stop_##_name = .;
#define CODETAG_SECTIONS() \
- SECTION_WITH_BOUNDARIES(alloc_tags)
-
-/*
- * Module codetags which aren't used after module unload, therefore have the
- * same lifespan as the module and can be safely unloaded with the module.
- */
-#define MOD_CODETAG_SECTIONS()
+ IF_MEM_ALLOC_PROFILING(SECTION_WITH_BOUNDARIES(alloc_tags))
#define MOD_SEPARATE_CODETAG_SECTION(_name) \
.codetag.##_name : { \
@@ -28,6 +28,6 @@
* unload them individually once unused.
*/
#define MOD_SEPARATE_CODETAG_SECTIONS() \
- MOD_SEPARATE_CODETAG_SECTION(alloc_tags)
+ IF_MEM_ALLOC_PROFILING(MOD_SEPARATE_CODETAG_SECTION(alloc_tags))
#endif /* __ASM_GENERIC_CODETAG_LDS_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index 3e0a8fe9b108..dcb8727f2b82 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -66,15 +66,6 @@ static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
}
#endif
-#ifndef __HAVE_ARCH_HUGETLB_FREE_PGD_RANGE
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- free_pgd_range(tlb, addr, end, floor, ceiling);
-}
-#endif
-
#ifndef __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pte, unsigned long sz)
@@ -114,14 +105,6 @@ static inline int huge_pte_none_mostly(pte_t pte)
}
#endif
-#ifndef __HAVE_ARCH_PREPARE_HUGEPAGE_RANGE
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- return 0;
-}
-#endif
-
#ifndef __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h
index 124c734ca5d9..92cca4b23f13 100644
--- a/include/asm-generic/msi.h
+++ b/include/asm-generic/msi.h
@@ -33,6 +33,7 @@ typedef struct msi_alloc_info {
/* Device generating MSIs is proxying for another device */
#define MSI_ALLOC_FLAGS_PROXY_DEVICE (1UL << 0)
+#define MSI_ALLOC_FLAGS_FIXED_MSG_DATA (1UL << 1)
#define GENERIC_MSI_DOMAIN_OPS 1
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index fa5f19b8d53a..ae2d2359b79e 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -167,7 +167,7 @@ defined(CONFIG_AUTOFDO_CLANG) || defined(CONFIG_PROPELLER_CLANG)
#define FTRACE_STUB_HACK
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
/*
* The ftrace call sites are logged to a section whose name depends on the
* compiler option used. A given kernel image will only use one, AKA
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 545dbefe3e13..2e60344437da 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -76,7 +76,6 @@ int crypto_engine_stop(struct crypto_engine *engine);
struct crypto_engine *crypto_engine_alloc_init(struct device *dev, bool rt);
struct crypto_engine *crypto_engine_alloc_init_and_set(struct device *dev,
bool retry_support,
- int (*cbk_do_batch)(struct crypto_engine *engine),
bool rt, int qlen);
void crypto_engine_exit(struct crypto_engine *engine);
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index ffffd88bbbad..2d97440028ff 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -63,10 +63,7 @@ struct crypto_acomp_stream {
struct crypto_acomp_streams {
/* These must come first because of struct scomp_alg. */
void *(*alloc_ctx)(void);
- union {
- void (*free_ctx)(void *);
- void (*cfree_ctx)(const void *);
- };
+ void (*free_ctx)(void *);
struct crypto_acomp_stream __percpu *streams;
struct work_struct stream_work;
diff --git a/include/crypto/internal/engine.h b/include/crypto/internal/engine.h
index b6a4ea2240fc..f19ef376833f 100644
--- a/include/crypto/internal/engine.h
+++ b/include/crypto/internal/engine.h
@@ -21,7 +21,6 @@ struct device;
/*
* struct crypto_engine - crypto hardware engine
* @name: the engine name
- * @idling: the engine is entering idle state
* @busy: request pump is busy
* @running: the engine is on working
* @retry_support: indication that the hardware allows re-execution
@@ -31,14 +30,6 @@ struct device;
* @list: link with the global crypto engine list
* @queue_lock: spinlock to synchronise access to request queue
* @queue: the crypto queue of the engine
- * @prepare_crypt_hardware: a request will soon arrive from the queue
- * so the subsystem requests the driver to prepare the hardware
- * by issuing this call
- * @unprepare_crypt_hardware: there are currently no more requests on the
- * queue so the subsystem notifies the driver that it may relax the
- * hardware by issuing this call
- * @do_batch_requests: execute a batch of requests. Depends on multiple
- * requests support.
* @kworker: kthread worker struct for request pump
* @pump_requests: work struct for scheduling work to the request pump
* @priv_data: the engine private data
@@ -46,7 +37,6 @@ struct device;
*/
struct crypto_engine {
char name[ENGINE_NAME_LEN];
- bool idling;
bool busy;
bool running;
@@ -58,11 +48,6 @@ struct crypto_engine {
struct crypto_queue queue;
struct device *dev;
- int (*prepare_crypt_hardware)(struct crypto_engine *engine);
- int (*unprepare_crypt_hardware)(struct crypto_engine *engine);
- int (*do_batch_requests)(struct crypto_engine *engine);
-
-
struct kthread_worker *kworker;
struct kthread_work pump_requests;
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index 0f85c543f80b..6ec5f2f37ccb 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -30,6 +30,20 @@
__##name##_req, (req))
struct ahash_request;
+struct scatterlist;
+
+struct crypto_hash_walk {
+ const char *data;
+
+ unsigned int offset;
+ unsigned int flags;
+
+ struct page *pg;
+ unsigned int entrylen;
+
+ unsigned int total;
+ struct scatterlist *sg;
+};
struct ahash_instance {
void (*free)(struct ahash_instance *inst);
@@ -61,6 +75,15 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
+int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
+int crypto_hash_walk_first(struct ahash_request *req,
+ struct crypto_hash_walk *walk);
+
+static inline int crypto_hash_walk_last(struct crypto_hash_walk *walk)
+{
+ return !(walk->entrylen | walk->total);
+}
+
int crypto_register_ahash(struct ahash_alg *alg);
void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
@@ -91,6 +114,12 @@ static inline bool crypto_hash_alg_needs_key(struct hash_alg_common *alg)
!(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
}
+static inline bool crypto_hash_no_export_core(struct crypto_ahash *tfm)
+{
+ return crypto_hash_alg_common(tfm)->base.cra_flags &
+ CRYPTO_AHASH_ALG_NO_EXPORT_CORE;
+}
+
int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
struct crypto_instance *inst,
const char *name, u32 type, u32 mask);
@@ -167,6 +196,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
tfm->reqsize = reqsize;
}
+static inline bool crypto_ahash_tested(struct crypto_ahash *tfm)
+{
+ struct crypto_tfm *tfm_base = crypto_ahash_tfm(tfm);
+
+ return tfm_base->__crt_alg->cra_flags & CRYPTO_ALG_TESTED;
+}
+
static inline void crypto_ahash_set_reqsize_dma(struct crypto_ahash *ahash,
unsigned int reqsize)
{
diff --git a/include/drm/amd/isp.h b/include/drm/amd/isp.h
new file mode 100644
index 000000000000..ec868288abf2
--- /dev/null
+++ b/include/drm/amd/isp.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright (C) 2025 Advanced Micro Devices, Inc. All rights reserved.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ */
+
+#ifndef __ISP_H__
+#define __ISP_H__
+
+#include <linux/types.h>
+
+struct device;
+
+struct isp_platform_data {
+ void *adev;
+ u32 asic_type;
+ resource_size_t base_rmmio_size;
+};
+
+int isp_user_buffer_alloc(struct device *dev, void *dmabuf,
+ void **buf_obj, u64 *buf_addr);
+
+void isp_user_buffer_free(void *buf_obj);
+
+int isp_kernel_buffer_alloc(struct device *dev, u64 size,
+ void **buf_obj, u64 *gpu_addr, void **cpu_addr);
+
+void isp_kernel_buffer_free(void **buf_obj, u64 *gpu_addr, void **cpu_addr);
+
+#endif
diff --git a/include/drm/display/drm_dp.h b/include/drm/display/drm_dp.h
index 3001c0b6e7bb..811e9238a77c 100644
--- a/include/drm/display/drm_dp.h
+++ b/include/drm/display/drm_dp.h
@@ -547,16 +547,28 @@
/* DFP Capability Extension */
#define DP_DFP_CAPABILITY_EXTENSION_SUPPORT 0x0a3 /* 2.0 */
-#define DP_PANEL_REPLAY_CAP 0x0b0 /* DP 2.0 */
+#define DP_PANEL_REPLAY_CAP_SUPPORT 0x0b0 /* DP 2.0 */
# define DP_PANEL_REPLAY_SUPPORT (1 << 0)
# define DP_PANEL_REPLAY_SU_SUPPORT (1 << 1)
# define DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT (1 << 2) /* eDP 1.5 */
-#define DP_PANEL_PANEL_REPLAY_CAPABILITY 0xb1
-# define DP_PANEL_PANEL_REPLAY_SU_GRANULARITY_REQUIRED (1 << 5)
-
-#define DP_PANEL_PANEL_REPLAY_X_GRANULARITY 0xb2
-#define DP_PANEL_PANEL_REPLAY_Y_GRANULARITY 0xb4
+#define DP_PANEL_REPLAY_CAP_SIZE 7
+
+#define DP_PANEL_REPLAY_CAP_CAPABILITY 0xb1
+# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT 1 /* DP 2.1a */
+# define DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_MASK (3 << DP_PANEL_REPLAY_DSC_DECODE_CAPABILITY_IN_PR_SHIFT)
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_SUPPORTED 0x00
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_FULL_FRAME_ONLY 0x01
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_NOT_SUPPORTED 0x02
+# define DP_DSC_DECODE_CAPABILITY_IN_PR_RESERVED 0x03
+# define DP_PANEL_REPLAY_ASYNC_VIDEO_TIMING_NOT_SUPPORTED_IN_PR (1 << 3)
+# define DP_PANEL_REPLAY_DSC_CRC_OF_MULTIPLE_SUS_SUPPORTED (1 << 4)
+# define DP_PANEL_REPLAY_SU_GRANULARITY_REQUIRED (1 << 5)
+# define DP_PANEL_REPLAY_SU_Y_GRANULARITY_EXTENDED_CAPABILITY_SUPPORTED (1 << 6)
+# define DP_PANEL_REPLAY_LINK_OFF_SUPPORTED_IN_PR_AFTER_ADAPTIVE_SYNC_SDP (1 << 7)
+
+#define DP_PANEL_REPLAY_CAP_X_GRANULARITY 0xb2
+#define DP_PANEL_REPLAY_CAP_Y_GRANULARITY 0xb4
/* Link Configuration */
#define DP_LINK_BW_SET 0x100
diff --git a/include/drm/display/drm_dp_helper.h b/include/drm/display/drm_dp_helper.h
index e4ca35143ff9..87caa4f1fdb8 100644
--- a/include/drm/display/drm_dp_helper.h
+++ b/include/drm/display/drm_dp_helper.h
@@ -523,10 +523,16 @@ struct drm_dp_aux {
* @no_zero_sized: If the hw can't use zero sized transfers (NVIDIA)
*/
bool no_zero_sized;
+
+ /**
+ * @dpcd_probe_disabled: If probing before a DPCD access is disabled.
+ */
+ bool dpcd_probe_disabled;
};
int drm_dp_dpcd_probe(struct drm_dp_aux *aux, unsigned int offset);
void drm_dp_dpcd_set_powered(struct drm_dp_aux *aux, bool powered);
+void drm_dp_dpcd_set_probe(struct drm_dp_aux *aux, bool enable);
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
void *buffer, size_t size);
ssize_t drm_dp_dpcd_write(struct drm_dp_aux *aux, unsigned int offset,
@@ -837,6 +843,7 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
* @lsb_reg_used: Do we also write values to the DP_EDP_BACKLIGHT_BRIGHTNESS_LSB register?
* @aux_enable: Does the panel support the AUX enable cap?
* @aux_set: Does the panel support setting the brightness through AUX?
+ * @luminance_set: Does the panel support setting the brightness through AUX using luminance values?
*
* This structure contains various data about an eDP backlight, which can be populated by using
* drm_edp_backlight_init().
@@ -844,21 +851,23 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
struct drm_edp_backlight_info {
u8 pwmgen_bit_count;
u8 pwm_freq_pre_divider;
- u16 max;
+ u32 max;
bool lsb_reg_used : 1;
bool aux_enable : 1;
bool aux_set : 1;
+ bool luminance_set : 1;
};
int
drm_edp_backlight_init(struct drm_dp_aux *aux, struct drm_edp_backlight_info *bl,
+ u32 max_luminance,
u16 driver_pwm_freq_hz, const u8 edp_dpcd[EDP_DISPLAY_CTL_CAP_SIZE],
- u16 *current_level, u8 *current_mode);
+ u32 *current_level, u8 *current_mode, bool need_luminance);
int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level);
+ u32 level);
int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl,
- u16 level);
+ u32 level);
int drm_edp_backlight_disable(struct drm_dp_aux *aux, const struct drm_edp_backlight_info *bl);
#if IS_ENABLED(CONFIG_DRM_KMS_HELPER) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
diff --git a/include/drm/display/drm_hdmi_audio_helper.h b/include/drm/display/drm_hdmi_audio_helper.h
index c9a6faef4109..44d910bdc72d 100644
--- a/include/drm/display/drm_hdmi_audio_helper.h
+++ b/include/drm/display/drm_hdmi_audio_helper.h
@@ -14,6 +14,7 @@ int drm_connector_hdmi_audio_init(struct drm_connector *connector,
struct device *hdmi_codec_dev,
const struct drm_connector_hdmi_audio_funcs *funcs,
unsigned int max_i2s_playback_channels,
+ u64 i2s_formats,
bool spdif_playback,
int sound_dai_port);
void drm_connector_hdmi_audio_plugged_notify(struct drm_connector *connector,
diff --git a/include/drm/display/drm_hdmi_cec_helper.h b/include/drm/display/drm_hdmi_cec_helper.h
new file mode 100644
index 000000000000..fd8f4d2f02c1
--- /dev/null
+++ b/include/drm/display/drm_hdmi_cec_helper.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef DRM_DISPLAY_HDMI_CEC_HELPER
+#define DRM_DISPLAY_HDMI_CEC_HELPER
+
+#include <linux/types.h>
+
+struct drm_connector;
+
+struct cec_msg;
+struct device;
+
+struct drm_connector_hdmi_cec_funcs {
+ /**
+ * @init: perform hardware-specific initialization before registering the CEC adapter
+ */
+ int (*init)(struct drm_connector *connector);
+
+ /**
+ * @uninit: perform hardware-specific teardown for the CEC adapter
+ */
+ void (*uninit)(struct drm_connector *connector);
+
+ /**
+ * @enable: enable or disable CEC adapter
+ */
+ int (*enable)(struct drm_connector *connector, bool enable);
+
+ /**
+ * @log_addr: set adapter's logical address, can be called multiple
+ * times if adapter supports several LAs
+ */
+ int (*log_addr)(struct drm_connector *connector, u8 logical_addr);
+
+ /**
+ * @transmit: start transmission of the specified CEC message
+ */
+ int (*transmit)(struct drm_connector *connector, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+};
+
+int drmm_connector_hdmi_cec_register(struct drm_connector *connector,
+ const struct drm_connector_hdmi_cec_funcs *funcs,
+ const char *name,
+ u8 available_las,
+ struct device *dev);
+
+void drm_connector_hdmi_cec_received_msg(struct drm_connector *connector,
+ struct cec_msg *msg);
+
+void drm_connector_hdmi_cec_transmit_done(struct drm_connector *connector,
+ u8 status,
+ u8 arb_lost_cnt, u8 nack_cnt,
+ u8 low_drive_cnt, u8 error_cnt);
+
+void drm_connector_hdmi_cec_transmit_attempt_done(struct drm_connector *connector,
+ u8 status);
+
+#if IS_ENABLED(CONFIG_DRM_DISPLAY_HDMI_CEC_NOTIFIER_HELPER)
+int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev);
+#else
+static inline int drmm_connector_hdmi_cec_notifier_register(struct drm_connector *connector,
+ const char *port_name,
+ struct device *dev)
+{
+ return 0;
+}
+#endif
+
+#endif
diff --git a/include/drm/drm_accel.h b/include/drm/drm_accel.h
index 038ccb02f9a3..20a665ec6f16 100644
--- a/include/drm/drm_accel.h
+++ b/include/drm/drm_accel.h
@@ -58,7 +58,6 @@ void accel_core_exit(void);
int accel_core_init(void);
void accel_set_device_instance_params(struct device *kdev, int index);
int accel_open(struct inode *inode, struct file *filp);
-void accel_debugfs_init(struct drm_device *dev);
void accel_debugfs_register(struct drm_device *dev);
#else
@@ -77,10 +76,6 @@ static inline void accel_set_device_instance_params(struct device *kdev, int ind
{
}
-static inline void accel_debugfs_init(struct drm_device *dev)
-{
-}
-
static inline void accel_debugfs_register(struct drm_device *dev)
{
}
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 4e418a29a9ff..8ed80cad77ec 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -32,6 +32,7 @@
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
+struct cec_msg;
struct device_node;
struct drm_bridge;
@@ -77,6 +78,16 @@ struct drm_bridge_funcs {
enum drm_bridge_attach_flags flags);
/**
+ * @destroy:
+ *
+ * This callback is invoked when the bridge is about to be
+ * deallocated.
+ *
+ * The @destroy callback is optional.
+ */
+ void (*destroy)(struct drm_bridge *bridge);
+
+ /**
* @detach:
*
* This callback is invoked whenever our bridge is being detached from a
@@ -164,17 +175,33 @@ struct drm_bridge_funcs {
/**
* @disable:
*
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @disable vfunc. If the preceding element is a &drm_encoder
- * it's called right before the &drm_encoder_helper_funcs.disable,
- * &drm_encoder_helper_funcs.prepare or &drm_encoder_helper_funcs.dpms
- * hook.
+ * The @disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is disabled via one of:
+ *
+ * - &drm_bridge_funcs.disable
+ * - &drm_bridge_funcs.atomic_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms.
+ *
* The @disable callback is optional.
*
* NOTE:
@@ -187,17 +214,34 @@ struct drm_bridge_funcs {
/**
* @post_disable:
*
- * This callback should disable the bridge. It is called right after the
- * preceding element in the display pipe is disabled. If the preceding
- * element is a bridge this means it's called after that bridge's
- * @post_disable function. If the preceding element is a &drm_encoder
- * it's called right after the encoder's
- * &drm_encoder_helper_funcs.disable, &drm_encoder_helper_funcs.prepare
- * or &drm_encoder_helper_funcs.dpms hook.
- *
* The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is no longer running when this callback is
- * called.
+ * signals) feeding this bridge is no longer running when the
+ * @post_disable is called.
+ *
+ * This callback should perform all the actions required by the hardware
+ * after it has stopped receiving signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is post-disabled (unless marked otherwise by the
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.post_disable
+ * - &drm_bridge_funcs.atomic_post_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms
*
* The @post_disable callback is optional.
*
@@ -240,18 +284,30 @@ struct drm_bridge_funcs {
/**
* @pre_enable:
*
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @pre_enable function. If the preceding element is a
- * &drm_encoder it's called right before the encoder's
- * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
- * &drm_encoder_helper_funcs.dpms hook.
- *
* The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
+ * will not yet be running when the @pre_enable is called.
+ *
+ * This callback should perform all the necessary actions to prepare the
+ * bridge to accept signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is pre-enabled (unless marked otherwise by
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.pre_enable
+ * - &drm_bridge_funcs.atomic_pre_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - &drm_encoder_helper_funcs.commit
*
* The @pre_enable callback is optional.
*
@@ -265,19 +321,31 @@ struct drm_bridge_funcs {
/**
* @enable:
*
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's @enable function. If the preceding element is a
- * &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.enable, &drm_encoder_helper_funcs.commit or
- * &drm_encoder_helper_funcs.dpms hook.
+ * The @enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is enabled via one of:
+ *
+ * - &drm_bridge_funcs.enable
+ * - &drm_bridge_funcs.atomic_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - drm_encoder_helper_funcs.commit
+ *
* The @enable callback is optional.
*
* NOTE:
@@ -290,17 +358,30 @@ struct drm_bridge_funcs {
/**
* @atomic_pre_enable:
*
- * This callback should enable the bridge. It is called right before
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @atomic_pre_enable or @pre_enable function. If the preceding
- * element is a &drm_encoder it's called right before the encoder's
- * &drm_encoder_helper_funcs.atomic_enable hook.
- *
* The display pipe (i.e. clocks and timing signals) feeding this bridge
- * will not yet be running when this callback is called. The bridge must
- * not enable the display link feeding the next bridge in the chain (if
- * there is one) when this callback is called.
+ * will not yet be running when the @atomic_pre_enable is called.
+ *
+ * This callback should perform all the necessary actions to prepare the
+ * bridge to accept signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is pre-enabled (unless marked otherwise by
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.pre_enable
+ * - &drm_bridge_funcs.atomic_pre_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - &drm_encoder_helper_funcs.commit
*
* The @atomic_pre_enable callback is optional.
*/
@@ -310,18 +391,31 @@ struct drm_bridge_funcs {
/**
* @atomic_enable:
*
- * This callback should enable the bridge. It is called right after
- * the preceding element in the display pipe is enabled. If the
- * preceding element is a bridge this means it's called after that
- * bridge's @atomic_enable or @enable function. If the preceding element
- * is a &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.atomic_enable hook.
+ * The @atomic_enable callback should enable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is running when this callback is called. This
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is enabled via one of:
+ *
+ * - &drm_bridge_funcs.enable
+ * - &drm_bridge_funcs.atomic_enable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the CRTC is enabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.atomic_enable
+ * - &drm_crtc_helper_funcs.commit
+ *
+ * and the encoder is enabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_enable
+ * - &drm_encoder_helper_funcs.enable
+ * - drm_encoder_helper_funcs.commit
+ *
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
@@ -329,16 +423,32 @@ struct drm_bridge_funcs {
/**
* @atomic_disable:
*
- * This callback should disable the bridge. It is called right before
- * the preceding element in the display pipe is disabled. If the
- * preceding element is a bridge this means it's called before that
- * bridge's @atomic_disable or @disable vfunc. If the preceding element
- * is a &drm_encoder it's called right before the
- * &drm_encoder_helper_funcs.atomic_disable hook.
+ * The @atomic_disable callback should disable the bridge.
*
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
+ * If the preceding element is a &drm_bridge, then this is called before
+ * that bridge is disabled via one of:
+ *
+ * - &drm_bridge_funcs.disable
+ * - &drm_bridge_funcs.atomic_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called before the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms.
+ *
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
@@ -347,16 +457,34 @@ struct drm_bridge_funcs {
/**
* @atomic_post_disable:
*
- * This callback should disable the bridge. It is called right after the
- * preceding element in the display pipe is disabled. If the preceding
- * element is a bridge this means it's called after that bridge's
- * @atomic_post_disable or @post_disable function. If the preceding
- * element is a &drm_encoder it's called right after the encoder's
- * &drm_encoder_helper_funcs.atomic_disable hook.
- *
* The bridge must assume that the display pipe (i.e. clocks and timing
- * signals) feeding it is no longer running when this callback is
- * called.
+ * signals) feeding this bridge is no longer running when the
+ * @atomic_post_disable is called.
+ *
+ * This callback should perform all the actions required by the hardware
+ * after it has stopped receiving signals from the preceding element.
+ *
+ * If the preceding element is a &drm_bridge, then this is called after
+ * that bridge is post-disabled (unless marked otherwise by the
+ * @pre_enable_prev_first flag) via one of:
+ *
+ * - &drm_bridge_funcs.post_disable
+ * - &drm_bridge_funcs.atomic_post_disable
+ *
+ * If the preceding element of the bridge is a display controller, then
+ * this callback is called after the encoder is disabled via one of:
+ *
+ * - &drm_encoder_helper_funcs.atomic_disable
+ * - &drm_encoder_helper_funcs.prepare
+ * - &drm_encoder_helper_funcs.disable
+ * - &drm_encoder_helper_funcs.dpms
+ *
+ * and the CRTC is disabled via one of:
+ *
+ * - &drm_crtc_helper_funcs.prepare
+ * - &drm_crtc_helper_funcs.atomic_disable
+ * - &drm_crtc_helper_funcs.disable
+ * - &drm_crtc_helper_funcs.dpms
*
* The @atomic_post_disable callback is optional.
*/
@@ -532,7 +660,8 @@ struct drm_bridge_funcs {
*
* drm_connector_status indicating the bridge output status.
*/
- enum drm_connector_status (*detect)(struct drm_bridge *bridge);
+ enum drm_connector_status (*detect)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @get_modes:
@@ -689,8 +818,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*hdmi_audio_startup)(struct drm_connector *connector,
- struct drm_bridge *bridge);
+ int (*hdmi_audio_startup)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @hdmi_audio_prepare:
@@ -703,8 +832,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*hdmi_audio_prepare)(struct drm_connector *connector,
- struct drm_bridge *bridge,
+ int (*hdmi_audio_prepare)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
@@ -719,8 +848,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- void (*hdmi_audio_shutdown)(struct drm_connector *connector,
- struct drm_bridge *bridge);
+ void (*hdmi_audio_shutdown)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @hdmi_audio_mute_stream:
@@ -733,10 +862,20 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*hdmi_audio_mute_stream)(struct drm_connector *connector,
- struct drm_bridge *bridge,
+ int (*hdmi_audio_mute_stream)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
bool enable, int direction);
+ int (*hdmi_cec_init)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ int (*hdmi_cec_enable)(struct drm_bridge *bridge, bool enable);
+
+ int (*hdmi_cec_log_addr)(struct drm_bridge *bridge, u8 logical_addr);
+
+ int (*hdmi_cec_transmit)(struct drm_bridge *bridge, u8 attempts,
+ u32 signal_free_time, struct cec_msg *msg);
+
/**
* @dp_audio_startup:
*
@@ -748,8 +887,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*dp_audio_startup)(struct drm_connector *connector,
- struct drm_bridge *bridge);
+ int (*dp_audio_startup)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @dp_audio_prepare:
@@ -762,8 +901,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*dp_audio_prepare)(struct drm_connector *connector,
- struct drm_bridge *bridge,
+ int (*dp_audio_prepare)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
struct hdmi_codec_daifmt *fmt,
struct hdmi_codec_params *hparms);
@@ -778,8 +917,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- void (*dp_audio_shutdown)(struct drm_connector *connector,
- struct drm_bridge *bridge);
+ void (*dp_audio_shutdown)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
/**
* @dp_audio_mute_stream:
@@ -792,8 +931,8 @@ struct drm_bridge_funcs {
* Returns:
* 0 on success, a negative error code otherwise
*/
- int (*dp_audio_mute_stream)(struct drm_connector *connector,
- struct drm_bridge *bridge,
+ int (*dp_audio_mute_stream)(struct drm_bridge *bridge,
+ struct drm_connector *connector,
bool enable, int direction);
/**
@@ -907,6 +1046,16 @@ enum drm_bridge_ops {
* flag.
*/
DRM_BRIDGE_OP_DP_AUDIO = BIT(6),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER: The bridge requires CEC notifier
+ * to be present.
+ */
+ DRM_BRIDGE_OP_HDMI_CEC_NOTIFIER = BIT(7),
+ /**
+ * @DRM_BRIDGE_OP_HDMI_CEC_ADAPTER: The bridge requires CEC adapter
+ * to be present.
+ */
+ DRM_BRIDGE_OP_HDMI_CEC_ADAPTER = BIT(8),
};
/**
@@ -977,21 +1126,6 @@ struct drm_bridge {
* @ddc: Associated I2C adapter for DDC access, if any.
*/
struct i2c_adapter *ddc;
- /** private: */
- /**
- * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
- */
- struct mutex hpd_mutex;
- /**
- * @hpd_cb: Hot plug detection callback, registered with
- * drm_bridge_hpd_enable().
- */
- void (*hpd_cb)(void *data, enum drm_connector_status status);
- /**
- * @hpd_data: Private data passed to the Hot plug detection callback
- * @hpd_cb.
- */
- void *hpd_data;
/**
* @vendor: Vendor of the product to be used for the SPD InfoFrame
@@ -1019,6 +1153,12 @@ struct drm_bridge {
unsigned int max_bpc;
/**
+ * @hdmi_cec_dev: device to be used as a containing device for CEC
+ * functions.
+ */
+ struct device *hdmi_cec_dev;
+
+ /**
* @hdmi_audio_dev: device to be used as a parent for the HDMI Codec if
* either of @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO is set.
*/
@@ -1032,6 +1172,14 @@ struct drm_bridge {
int hdmi_audio_max_i2s_playback_channels;
/**
+ * @hdmi_audio_i2s_formats: supported I2S formats, optional. The
+ * default is to allow all formats supported by the corresponding I2S
+ * bus driver. This is only used for bridges setting
+ * @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
+ */
+ u64 hdmi_audio_i2s_formats;
+
+ /**
* @hdmi_audio_spdif_playback: set if this bridge has S/PDIF playback
* port for @DRM_BRIDGE_OP_HDMI_AUDIO or @DRM_BRIDGE_OP_DP_AUDIO.
*/
@@ -1043,6 +1191,32 @@ struct drm_bridge {
* not used.
*/
int hdmi_audio_dai_port;
+
+ /**
+ * @hdmi_cec_adapter_name: the name of the adapter to register
+ */
+ const char *hdmi_cec_adapter_name;
+
+ /**
+ * @hdmi_cec_available_las: number of logical addresses, CEC_MAX_LOG_ADDRS if unset
+ */
+ u8 hdmi_cec_available_las;
+
+ /** private: */
+ /**
+ * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
+ */
+ struct mutex hpd_mutex;
+ /**
+ * @hpd_cb: Hot plug detection callback, registered with
+ * drm_bridge_hpd_enable().
+ */
+ void (*hpd_cb)(void *data, enum drm_connector_status status);
+ /**
+ * @hpd_data: Private data passed to the Hot plug detection callback
+ * @hpd_cb.
+ */
+ void *hpd_data;
};
static inline struct drm_bridge *
@@ -1209,7 +1383,8 @@ drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
u32 output_fmt,
unsigned int *num_input_fmts);
-enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+enum drm_connector_status
+drm_bridge_detect(struct drm_bridge *bridge, struct drm_connector *connector);
int drm_bridge_get_modes(struct drm_bridge *bridge,
struct drm_connector *connector);
const struct drm_edid *drm_bridge_edid_read(struct drm_bridge *bridge,
@@ -1274,6 +1449,8 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
+void devm_drm_put_bridge(struct device *dev, struct drm_bridge *bridge);
+
void drm_bridge_debugfs_params(struct dentry *root);
void drm_bridge_debugfs_encoder_params(struct dentry *root, struct drm_encoder *encoder);
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index ed81741036d7..6cb577f6dba6 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -118,4 +118,31 @@ enum drm_color_lut_tests {
};
int drm_color_lut_check(const struct drm_property_blob *lut, u32 tests);
+
+/*
+ * Gamma-LUT programming
+ */
+
+typedef void (*drm_crtc_set_lut_func)(struct drm_crtc *, unsigned int, u16, u16, u16);
+
+void drm_crtc_load_gamma_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+void drm_crtc_load_gamma_565_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+void drm_crtc_load_gamma_555_from_888(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_gamma);
+
+void drm_crtc_fill_gamma_888(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+void drm_crtc_fill_gamma_565(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+void drm_crtc_fill_gamma_555(struct drm_crtc *crtc, drm_crtc_set_lut_func set_gamma);
+
+/*
+ * Color-LUT programming
+ */
+
+void drm_crtc_load_palette_8(struct drm_crtc *crtc, const struct drm_color_lut *lut,
+ drm_crtc_set_lut_func set_palette);
+
+void drm_crtc_fill_palette_8(struct drm_crtc *crtc, drm_crtc_set_lut_func set_palette);
+
#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index f13d597370a3..8f34f4b8183d 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -800,6 +800,11 @@ struct drm_display_info {
struct drm_hdmi_info hdmi;
/**
+ * @hdr_sink_metadata: HDR Metadata Information read from sink
+ */
+ struct hdr_sink_metadata hdr_sink_metadata;
+
+ /**
* @non_desktop: Non desktop display (HMD).
*/
bool non_desktop;
@@ -843,7 +848,9 @@ struct drm_display_info {
int vics_len;
/**
- * @quirks: EDID based quirks. Internal to EDID parsing.
+ * @quirks: EDID based quirks. DRM core and drivers can query the
+ * @drm_edid_quirk quirks using drm_edid_has_quirk(), the rest of
+ * the quirks also tracked here are internal to EDID parsing.
*/
u32 quirks;
@@ -1191,6 +1198,29 @@ struct drm_connector_hdmi_audio_funcs {
bool enable, int direction);
};
+void drm_connector_cec_phys_addr_invalidate(struct drm_connector *connector);
+void drm_connector_cec_phys_addr_set(struct drm_connector *connector);
+
+/**
+ * struct drm_connector_cec_funcs - drm_hdmi_connector control functions
+ */
+struct drm_connector_cec_funcs {
+ /**
+ * @phys_addr_invalidate: mark CEC physical address as invalid
+ *
+ * The callback to mark CEC physical address as invalid, abstracting
+ * the operation.
+ */
+ void (*phys_addr_invalidate)(struct drm_connector *connector);
+
+ /**
+ * @phys_addr_set: set CEC physical address
+ *
+ * The callback to set CEC physical address, abstracting the operation.
+ */
+ void (*phys_addr_set)(struct drm_connector *connector, u16 addr);
+};
+
/**
* struct drm_connector_hdmi_funcs - drm_hdmi_connector control functions
*/
@@ -1833,6 +1863,26 @@ struct drm_connector_hdmi {
};
/**
+ * struct drm_connector_cec - DRM Connector CEC-related structure
+ */
+struct drm_connector_cec {
+ /**
+ * @mutex: protects all fields in this structure.
+ */
+ struct mutex mutex;
+
+ /**
+ * @funcs: CEC Control Functions
+ */
+ const struct drm_connector_cec_funcs *funcs;
+
+ /**
+ * @data: CEC implementation-specific data
+ */
+ void *data;
+};
+
+/**
* struct drm_connector - central DRM connector control structure
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
@@ -2241,9 +2291,6 @@ struct drm_connector {
*/
struct llist_node free_node;
- /** @hdr_sink_metadata: HDR Metadata Information read from sink */
- struct hdr_sink_metadata hdr_sink_metadata;
-
/**
* @hdmi: HDMI-related variable and properties.
*/
@@ -2253,6 +2300,11 @@ struct drm_connector {
* @hdmi_audio: HDMI codec properties and non-DRM state.
*/
struct drm_connector_hdmi_audio hdmi_audio;
+
+ /**
+ * @cec: CEC-related data.
+ */
+ struct drm_connector_cec cec;
};
#define obj_to_connector(x) container_of(x, struct drm_connector, base)
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index cf06cee4343f..ea8cba94208a 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -153,6 +153,9 @@ void drm_debugfs_add_files(struct drm_device *dev,
int drm_debugfs_gpuva_info(struct seq_file *m,
struct drm_gpuvm *gpuvm);
+
+void drm_debugfs_clients_add(struct drm_file *file);
+void drm_debugfs_clients_remove(struct drm_file *file);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -181,6 +184,14 @@ static inline int drm_debugfs_gpuva_info(struct seq_file *m,
{
return 0;
}
+
+static inline void drm_debugfs_clients_add(struct drm_file *file)
+{
+}
+
+static inline void drm_debugfs_clients_remove(struct drm_file *file)
+{
+}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index e2f894f1b90a..a33aedd5e9ec 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -5,6 +5,7 @@
#include <linux/kref.h>
#include <linux/mutex.h>
#include <linux/idr.h>
+#include <linux/sched.h>
#include <drm/drm_mode_config.h>
@@ -31,6 +32,16 @@ struct pci_controller;
#define DRM_WEDGE_RECOVERY_BUS_RESET BIT(2) /* unbind + reset bus device + bind */
/**
+ * struct drm_wedge_task_info - information about the guilty task of a wedge dev
+ */
+struct drm_wedge_task_info {
+ /** @pid: pid of the task */
+ pid_t pid;
+ /** @comm: command name of the task */
+ char comm[TASK_COMM_LEN];
+};
+
+/**
* enum switch_power_state - power state of drm device
*/
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 63b51942d606..42fc085f986d 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -487,7 +487,8 @@ void drm_put_dev(struct drm_device *dev);
bool drm_dev_enter(struct drm_device *dev, int *idx);
void drm_dev_exit(int idx);
void drm_dev_unplug(struct drm_device *dev);
-int drm_dev_wedged_event(struct drm_device *dev, unsigned long method);
+int drm_dev_wedged_event(struct drm_device *dev, unsigned long method,
+ struct drm_wedge_task_info *info);
/**
* drm_dev_is_unplugged - is a DRM device unplugged
@@ -571,9 +572,24 @@ static inline bool drm_firmware_drivers_only(void)
}
#if defined(CONFIG_DEBUG_FS)
-void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root);
+void drm_debugfs_dev_init(struct drm_device *dev);
+void drm_debugfs_init_root(void);
+void drm_debugfs_remove_root(void);
+void drm_debugfs_bridge_params(void);
#else
-static inline void drm_debugfs_dev_init(struct drm_device *dev, struct dentry *root)
+static inline void drm_debugfs_dev_init(struct drm_device *dev)
+{
+}
+
+static inline void drm_debugfs_init_root(void)
+{
+}
+
+static inline void drm_debugfs_remove_root(void)
+{
+}
+
+static inline void drm_debugfs_bridge_params(void)
{
}
#endif
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b38409670868..3d1aecfec9b2 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -109,6 +109,13 @@ struct detailed_data_string {
#define DRM_EDID_CVT_FLAGS_STANDARD_BLANKING (1 << 3)
#define DRM_EDID_CVT_FLAGS_REDUCED_BLANKING (1 << 4)
+enum drm_edid_quirk {
+ /* Do a dummy read before DPCD accesses, to prevent corruption. */
+ DRM_EDID_QUIRK_DP_DPCD_PROBE,
+
+ DRM_EDID_QUIRK_NUM,
+};
+
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
@@ -476,5 +483,6 @@ void drm_edid_print_product_id(struct drm_printer *p,
u32 drm_edid_get_panel_id(const struct drm_edid *drm_edid);
bool drm_edid_match(const struct drm_edid *drm_edid,
const struct drm_edid_ident *ident);
+bool drm_edid_has_quirk(struct drm_connector *connector, enum drm_edid_quirk quirk);
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index d344d41e6cfe..115763799625 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -403,6 +403,13 @@ struct drm_file {
* @client_name_lock: Protects @client_name.
*/
struct mutex client_name_lock;
+
+ /**
+ * @debugfs_client:
+ *
+ * debugfs directory for each client under a drm node.
+ */
+ struct dentry *debugfs_client;
};
/**
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index d8539174ca11..562bc383ece4 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -82,8 +82,10 @@ void drm_fb_xrgb8888_to_rgb332(struct iosys_map *dst, const unsigned int *dst_pi
const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_rgb565(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
- const struct drm_rect *clip, struct drm_format_conv_state *state,
- bool swab);
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_rgb565be(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_xrgb1555(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
@@ -102,6 +104,15 @@ void drm_fb_xrgb8888_to_bgr888(struct iosys_map *dst, const unsigned int *dst_pi
void drm_fb_xrgb8888_to_argb8888(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_abgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_xbgr8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
+void drm_fb_xrgb8888_to_bgrx8888(struct iosys_map *dst, const unsigned int *dst_pitch,
+ const struct iosys_map *src, const struct drm_framebuffer *fb,
+ const struct drm_rect *clip, struct drm_format_conv_state *state);
void drm_fb_xrgb8888_to_xrgb2101010(struct iosys_map *dst, const unsigned int *dst_pitch,
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip,
@@ -125,8 +136,4 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc
const struct iosys_map *src, const struct drm_framebuffer *fb,
const struct drm_rect *clip, struct drm_format_conv_state *state);
-size_t drm_fb_build_fourcc_list(struct drm_device *dev,
- const u32 *native_fourccs, size_t native_nfourccs,
- u32 *fourccs_out, size_t nfourccs_out);
-
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index c3f4405d6662..471784426857 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -54,7 +54,6 @@
#endif
struct drm_device;
-struct drm_mode_fb_cmd2;
/**
* struct drm_format_info - information about a DRM format
@@ -309,7 +308,7 @@ const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
const struct drm_format_info *
drm_get_format_info(struct drm_device *dev,
- const struct drm_mode_fb_cmd2 *mode_cmd);
+ u32 pixel_format, u64 modifier);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
uint32_t drm_driver_legacy_fb_format(struct drm_device *dev,
uint32_t bpp, uint32_t depth);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index a3133a08267c..d3a7b43e2c63 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -126,7 +126,8 @@ struct drm_gem_object_funcs {
/**
* @pin:
*
- * Pin backing buffer in memory. Used by the drm_gem_map_attach() helper.
+ * Pin backing buffer in memory, such that dma-buf importers can
+ * access it. Used by the drm_gem_map_attach() helper.
*
* This callback is optional.
*/
@@ -559,10 +560,12 @@ void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock);
void drm_gem_lru_remove(struct drm_gem_object *obj);
void drm_gem_lru_move_tail_locked(struct drm_gem_lru *lru, struct drm_gem_object *obj);
void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj);
-unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
- unsigned int nr_to_scan,
- unsigned long *remaining,
- bool (*shrink)(struct drm_gem_object *obj));
+unsigned long
+drm_gem_lru_scan(struct drm_gem_lru *lru,
+ unsigned int nr_to_scan,
+ unsigned long *remaining,
+ bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket),
+ struct ww_acquire_ctx *ticket);
int drm_gem_evict_locked(struct drm_gem_object *obj);
diff --git a/include/drm/drm_gem_framebuffer_helper.h b/include/drm/drm_gem_framebuffer_helper.h
index d302521f3dd4..24f1fd40d553 100644
--- a/include/drm/drm_gem_framebuffer_helper.h
+++ b/include/drm/drm_gem_framebuffer_helper.h
@@ -8,6 +8,7 @@ struct drm_afbc_framebuffer;
struct drm_device;
struct drm_fb_helper_surface_size;
struct drm_file;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_framebuffer_funcs;
struct drm_gem_object;
@@ -24,17 +25,21 @@ int drm_gem_fb_create_handle(struct drm_framebuffer *fb, struct drm_file *file,
int drm_gem_fb_init_with_funcs(struct drm_device *dev,
struct drm_framebuffer *fb,
struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create_with_funcs(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
const struct drm_framebuffer_funcs *funcs);
struct drm_framebuffer *
drm_gem_fb_create(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
struct drm_framebuffer *
drm_gem_fb_create_with_dirty(struct drm_device *dev, struct drm_file *file,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map,
@@ -47,6 +52,7 @@ void drm_gem_fb_end_cpu_access(struct drm_framebuffer *fb, enum dma_data_directi
(((modifier) & AFBC_VENDOR_AND_TYPE_MASK) == DRM_FORMAT_MOD_ARM_AFBC(0))
int drm_gem_fb_afbc_init(struct drm_device *dev,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd,
struct drm_afbc_framebuffer *afbc_fb);
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index b4f993da3cae..92f5db84b9c2 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -287,15 +287,18 @@ drm_gem_shmem_prime_import_sg_table(struct drm_device *dev,
struct sg_table *sgt);
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
+struct drm_gem_object *drm_gem_shmem_prime_import_no_map(struct drm_device *dev,
+ struct dma_buf *buf);
/**
* DRM_GEM_SHMEM_DRIVER_OPS - Default shmem GEM operations
*
- * This macro provides a shortcut for setting the shmem GEM operations in
- * the &drm_driver structure.
+ * This macro provides a shortcut for setting the shmem GEM operations
+ * in the &drm_driver structure. Drivers that do not require an s/g table
+ * for imported buffers should use this.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
- .gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
- .dumb_create = drm_gem_shmem_dumb_create
+ .gem_prime_import = drm_gem_shmem_prime_import_no_map, \
+ .dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 00830b49a3ff..2dd42bed679d 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -94,8 +94,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
-int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
-int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
int drm_gem_vram_vmap(struct drm_gem_vram_object *gbo, struct iosys_map *map);
void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo,
struct iosys_map *map);
diff --git a/include/drm/drm_gpusvm.h b/include/drm/drm_gpusvm.h
index eaf704d3d05e..4aedc5423aff 100644
--- a/include/drm/drm_gpusvm.h
+++ b/include/drm/drm_gpusvm.h
@@ -16,92 +16,10 @@ struct drm_gpusvm;
struct drm_gpusvm_notifier;
struct drm_gpusvm_ops;
struct drm_gpusvm_range;
-struct drm_gpusvm_devmem;
struct drm_pagemap;
struct drm_pagemap_device_addr;
/**
- * struct drm_gpusvm_devmem_ops - Operations structure for GPU SVM device memory
- *
- * This structure defines the operations for GPU Shared Virtual Memory (SVM)
- * device memory. These operations are provided by the GPU driver to manage device memory
- * allocations and perform operations such as migration between device memory and system
- * RAM.
- */
-struct drm_gpusvm_devmem_ops {
- /**
- * @devmem_release: Release device memory allocation (optional)
- * @devmem_allocation: device memory allocation
- *
- * Release device memory allocation and drop a reference to device
- * memory allocation.
- */
- void (*devmem_release)(struct drm_gpusvm_devmem *devmem_allocation);
-
- /**
- * @populate_devmem_pfn: Populate device memory PFN (required for migration)
- * @devmem_allocation: device memory allocation
- * @npages: Number of pages to populate
- * @pfn: Array of page frame numbers to populate
- *
- * Populate device memory page frame numbers (PFN).
- *
- * Return: 0 on success, a negative error code on failure.
- */
- int (*populate_devmem_pfn)(struct drm_gpusvm_devmem *devmem_allocation,
- unsigned long npages, unsigned long *pfn);
-
- /**
- * @copy_to_devmem: Copy to device memory (required for migration)
- * @pages: Pointer to array of device memory pages (destination)
- * @dma_addr: Pointer to array of DMA addresses (source)
- * @npages: Number of pages to copy
- *
- * Copy pages to device memory.
- *
- * Return: 0 on success, a negative error code on failure.
- */
- int (*copy_to_devmem)(struct page **pages,
- dma_addr_t *dma_addr,
- unsigned long npages);
-
- /**
- * @copy_to_ram: Copy to system RAM (required for migration)
- * @pages: Pointer to array of device memory pages (source)
- * @dma_addr: Pointer to array of DMA addresses (destination)
- * @npages: Number of pages to copy
- *
- * Copy pages to system RAM.
- *
- * Return: 0 on success, a negative error code on failure.
- */
- int (*copy_to_ram)(struct page **pages,
- dma_addr_t *dma_addr,
- unsigned long npages);
-};
-
-/**
- * struct drm_gpusvm_devmem - Structure representing a GPU SVM device memory allocation
- *
- * @dev: Pointer to the device structure which device memory allocation belongs to
- * @mm: Pointer to the mm_struct for the address space
- * @detached: device memory allocations is detached from device pages
- * @ops: Pointer to the operations structure for GPU SVM device memory
- * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
- * @size: Size of device memory allocation
- * @timeslice_expiration: Timeslice expiration in jiffies
- */
-struct drm_gpusvm_devmem {
- struct device *dev;
- struct mm_struct *mm;
- struct completion detached;
- const struct drm_gpusvm_devmem_ops *ops;
- struct drm_pagemap *dpagemap;
- size_t size;
- u64 timeslice_expiration;
-};
-
-/**
* struct drm_gpusvm_ops - Operations structure for GPU SVM
*
* This structure defines the operations for GPU Shared Virtual Memory (SVM).
@@ -327,6 +245,11 @@ void drm_gpusvm_fini(struct drm_gpusvm *gpusvm);
void drm_gpusvm_free(struct drm_gpusvm *gpusvm);
+unsigned long
+drm_gpusvm_find_vma_start(struct drm_gpusvm *gpusvm,
+ unsigned long start,
+ unsigned long end);
+
struct drm_gpusvm_range *
drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm,
unsigned long fault_addr,
@@ -356,15 +279,6 @@ void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm,
struct drm_gpusvm_range *range,
const struct drm_gpusvm_ctx *ctx);
-int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm,
- struct drm_gpusvm_range *range,
- struct drm_gpusvm_devmem *devmem_allocation,
- const struct drm_gpusvm_ctx *ctx);
-
-int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation);
-
-const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void);
-
bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start,
unsigned long end);
@@ -375,11 +289,6 @@ drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start,
void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range,
const struct mmu_notifier_range *mmu_range);
-void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation,
- struct device *dev, struct mm_struct *mm,
- const struct drm_gpusvm_devmem_ops *ops,
- struct drm_pagemap *dpagemap, size_t size);
-
#ifdef CONFIG_LOCKDEP
/**
* drm_gpusvm_driver_set_lock() - Set the lock protecting accesses to GPU SVM
diff --git a/include/drm/drm_gpuvm.h b/include/drm/drm_gpuvm.h
index 2a9629377633..274532facfd6 100644
--- a/include/drm/drm_gpuvm.h
+++ b/include/drm/drm_gpuvm.h
@@ -1211,6 +1211,14 @@ int drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv,
int drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv,
u64 addr, u64 range);
+int drm_gpuvm_sm_map_exec_lock(struct drm_gpuvm *gpuvm,
+ struct drm_exec *exec, unsigned int num_fences,
+ u64 req_addr, u64 req_range,
+ struct drm_gem_object *obj, u64 offset);
+
+int drm_gpuvm_sm_unmap_exec_lock(struct drm_gpuvm *gpuvm, struct drm_exec *exec,
+ u64 req_addr, u64 req_range);
+
void drm_gpuva_map(struct drm_gpuvm *gpuvm,
struct drm_gpuva *va,
struct drm_gpuva_op_map *op);
diff --git a/include/drm/drm_managed.h b/include/drm/drm_managed.h
index 53017cc609ac..72bfac002c06 100644
--- a/include/drm/drm_managed.h
+++ b/include/drm/drm_managed.h
@@ -129,14 +129,25 @@ void __drmm_mutex_release(struct drm_device *dev, void *res);
void __drmm_workqueue_release(struct drm_device *device, void *wq);
+/**
+ * drmm_alloc_ordered_workqueue - &drm_device managed alloc_ordered_workqueue()
+ * @dev: DRM device
+ * @fmt: printf format for the name of the workqueue
+ * @flags: WQ_* flags (only WQ_FREEZABLE and WQ_MEM_RECLAIM are meaningful)
+ * @args: args for @fmt
+ *
+ * This is a &drm_device-managed version of alloc_ordered_workqueue(). The
+ * allocated workqueue is automatically destroyed on the final drm_dev_put().
+ *
+ * Returns: workqueue on success, negative ERR_PTR otherwise.
+ */
#define drmm_alloc_ordered_workqueue(dev, fmt, flags, args...) \
({ \
struct workqueue_struct *wq = alloc_ordered_workqueue(fmt, flags, ##args); \
wq ? ({ \
int ret = drmm_add_action_or_reset(dev, __drmm_workqueue_release, wq); \
ret ? ERR_PTR(ret) : wq; \
- }) : \
- wq; \
+ }) : ERR_PTR(-ENOMEM); \
})
#endif
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 6d2c08e81101..57a869a6f6e8 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -130,8 +130,6 @@ struct mipi_dsi_host *of_find_mipi_dsi_host_by_node(struct device_node *node);
#define MIPI_DSI_MODE_VIDEO_NO_HBP BIT(6)
/* disable hsync-active area */
#define MIPI_DSI_MODE_VIDEO_NO_HSA BIT(7)
-/* flush display FIFO on vsync pulse */
-#define MIPI_DSI_MODE_VSYNC_FLUSH BIT(8)
/* disable EoT packets in HS mode */
#define MIPI_DSI_MODE_NO_EOT_PACKET BIT(9)
/* device supports non-continuous clock behavior (DSI spec 5.6.1) */
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 9e524b51a001..2e848b816218 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -82,6 +82,7 @@ struct drm_mode_config_funcs {
*/
struct drm_framebuffer *(*fb_create)(struct drm_device *dev,
struct drm_file *file_priv,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
/**
@@ -95,7 +96,7 @@ struct drm_mode_config_funcs {
* The format information specific to the given fb metadata, or
* NULL if none is found.
*/
- const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd);
+ const struct drm_format_info *(*get_format_info)(u32 pixel_format, u64 modifier);
/**
* @mode_valid:
diff --git a/include/drm/drm_modeset_helper.h b/include/drm/drm_modeset_helper.h
index 995fd981cab0..7e3d4c5a7f66 100644
--- a/include/drm/drm_modeset_helper.h
+++ b/include/drm/drm_modeset_helper.h
@@ -26,6 +26,7 @@
struct drm_crtc;
struct drm_crtc_funcs;
struct drm_device;
+struct drm_format_info;
struct drm_framebuffer;
struct drm_mode_fb_cmd2;
@@ -33,6 +34,7 @@ void drm_helper_move_panel_connectors_to_head(struct drm_device *);
void drm_helper_mode_fill_fb_struct(struct drm_device *dev,
struct drm_framebuffer *fb,
+ const struct drm_format_info *info,
const struct drm_mode_fb_cmd2 *mode_cmd);
int drm_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
diff --git a/include/drm/drm_pagemap.h b/include/drm/drm_pagemap.h
index 202c157ff4d7..e5f20a1235be 100644
--- a/include/drm/drm_pagemap.h
+++ b/include/drm/drm_pagemap.h
@@ -7,6 +7,7 @@
#include <linux/types.h>
struct drm_pagemap;
+struct drm_pagemap_zdd;
struct device;
/**
@@ -91,6 +92,35 @@ struct drm_pagemap_ops {
struct device *dev,
struct drm_pagemap_device_addr addr);
+ /**
+ * @populate_mm: Populate part of the mm with @dpagemap memory,
+ * migrating existing data.
+ * @dpagemap: The struct drm_pagemap managing the memory.
+ * @start: The virtual start address in @mm
+ * @end: The virtual end address in @mm
+ * @mm: Pointer to a live mm. The caller must have an mmget()
+ * reference.
+ *
+ * The caller will have the mm lock at least in read mode.
+ * Note that there is no guarantee that the memory is resident
+ * after the function returns, it's best effort only.
+ * When the mm is not using the memory anymore,
+ * it will be released. The struct drm_pagemap might have a
+ * mechanism in place to reclaim the memory and the data will
+ * then be migrated. Typically to system memory.
+ * The implementation should hold sufficient runtime power-
+ * references while pages are used in an address space and
+ * should ideally guard against hardware device unbind in
+ * a way such that device pages are migrated back to system
+ * followed by device page removal. The implementation should
+ * return -ENODEV after device removal.
+ *
+ * Return: 0 if successful. Negative error code on error.
+ */
+ int (*populate_mm)(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms);
};
/**
@@ -104,4 +134,109 @@ struct drm_pagemap {
struct device *dev;
};
+struct drm_pagemap_devmem;
+
+/**
+ * struct drm_pagemap_devmem_ops - Operations structure for GPU SVM device memory
+ *
+ * This structure defines the operations for GPU Shared Virtual Memory (SVM)
+ * device memory. These operations are provided by the GPU driver to manage device memory
+ * allocations and perform operations such as migration between device memory and system
+ * RAM.
+ */
+struct drm_pagemap_devmem_ops {
+ /**
+ * @devmem_release: Release device memory allocation (optional)
+ * @devmem_allocation: device memory allocation
+ *
+ * Release device memory allocation and drop a reference to device
+ * memory allocation.
+ */
+ void (*devmem_release)(struct drm_pagemap_devmem *devmem_allocation);
+
+ /**
+ * @populate_devmem_pfn: Populate device memory PFN (required for migration)
+ * @devmem_allocation: device memory allocation
+ * @npages: Number of pages to populate
+ * @pfn: Array of page frame numbers to populate
+ *
+ * Populate device memory page frame numbers (PFN).
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*populate_devmem_pfn)(struct drm_pagemap_devmem *devmem_allocation,
+ unsigned long npages, unsigned long *pfn);
+
+ /**
+ * @copy_to_devmem: Copy to device memory (required for migration)
+ * @pages: Pointer to array of device memory pages (destination)
+ * @dma_addr: Pointer to array of DMA addresses (source)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to device memory.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_devmem)(struct page **pages,
+ dma_addr_t *dma_addr,
+ unsigned long npages);
+
+ /**
+ * @copy_to_ram: Copy to system RAM (required for migration)
+ * @pages: Pointer to array of device memory pages (source)
+ * @dma_addr: Pointer to array of DMA addresses (destination)
+ * @npages: Number of pages to copy
+ *
+ * Copy pages to system RAM.
+ *
+ * Return: 0 on success, a negative error code on failure.
+ */
+ int (*copy_to_ram)(struct page **pages,
+ dma_addr_t *dma_addr,
+ unsigned long npages);
+};
+
+/**
+ * struct drm_pagemap_devmem - Structure representing a GPU SVM device memory allocation
+ *
+ * @dev: Pointer to the device structure which device memory allocation belongs to
+ * @mm: Pointer to the mm_struct for the address space
+ * @detached: device memory allocations is detached from device pages
+ * @ops: Pointer to the operations structure for GPU SVM device memory
+ * @dpagemap: The struct drm_pagemap of the pages this allocation belongs to.
+ * @size: Size of device memory allocation
+ * @timeslice_expiration: Timeslice expiration in jiffies
+ */
+struct drm_pagemap_devmem {
+ struct device *dev;
+ struct mm_struct *mm;
+ struct completion detached;
+ const struct drm_pagemap_devmem_ops *ops;
+ struct drm_pagemap *dpagemap;
+ size_t size;
+ u64 timeslice_expiration;
+};
+
+int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
+ struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned long timeslice_ms,
+ void *pgmap_owner);
+
+int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation);
+
+const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void);
+
+struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page);
+
+void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
+ struct device *dev, struct mm_struct *mm,
+ const struct drm_pagemap_devmem_ops *ops,
+ struct drm_pagemap *dpagemap, size_t size);
+
+int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
+ unsigned long start, unsigned long end,
+ struct mm_struct *mm,
+ unsigned long timeslice_ms);
+
#endif
diff --git a/include/drm/drm_panic.h b/include/drm/drm_panic.h
index 310c88c4d336..ac0e46b73436 100644
--- a/include/drm/drm_panic.h
+++ b/include/drm/drm_panic.h
@@ -72,6 +72,12 @@ struct drm_scanout_buffer {
void (*set_pixel)(struct drm_scanout_buffer *sb, unsigned int x,
unsigned int y, u32 color);
+ /**
+ * @private: private pointer that you can use in the callbacks
+ * set_pixel()
+ */
+ void *private;
+
};
#ifdef CONFIG_DRM_PANIC
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index fa085c44d4ca..f50f862f0d8b 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -100,6 +100,9 @@ struct dma_buf *drm_gem_prime_export(struct drm_gem_object *obj,
unsigned long drm_prime_get_contiguous_size(struct sg_table *sgt);
/* helper functions for importing */
+bool drm_gem_is_prime_exported_dma_buf(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+
struct drm_gem_object *drm_gem_prime_import_dev(struct drm_device *dev,
struct dma_buf *dma_buf,
struct device *attach_dev);
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 1a7e377d4cbb..323a505e6e6a 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -192,7 +192,7 @@ struct drm_sched_entity {
* @last_scheduled:
*
* Points to the finished fence of the last scheduled job. Only written
- * by the scheduler thread, can be accessed locklessly from
+ * by drm_sched_entity_pop_job(). Can be accessed locklessly from
* drm_sched_job_arm() if the queue is empty.
*/
struct dma_fence __rcu *last_scheduled;
@@ -305,6 +305,13 @@ struct drm_sched_fence {
* @owner: job owner for debugging
*/
void *owner;
+
+ /**
+ * @drm_client_id:
+ *
+ * The client_id of the drm_file which owns the job.
+ */
+ uint64_t drm_client_id;
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
@@ -319,7 +326,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* @finish_cb: the callback for the finished fence.
* @credits: the number of credits this job contributes to the scheduler
* @work: Helper to reschedule job kill to different context.
- * @id: a unique id assigned to each job scheduled on the scheduler.
* @karma: increment on every hang caused by this job. If this exceeds the hang
* limit of the scheduler then the job is marked guilty and will not
* be scheduled further.
@@ -332,8 +338,6 @@ struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
* to schedule the job.
*/
struct drm_sched_job {
- u64 id;
-
/**
* @submit_ts:
*
@@ -387,13 +391,16 @@ struct drm_sched_job {
* enum drm_gpu_sched_stat - the scheduler's status
*
* @DRM_GPU_SCHED_STAT_NONE: Reserved. Do not use.
- * @DRM_GPU_SCHED_STAT_NOMINAL: Operation succeeded.
+ * @DRM_GPU_SCHED_STAT_RESET: The GPU hung and successfully reset.
* @DRM_GPU_SCHED_STAT_ENODEV: Error: Device is not available anymore.
+ * @DRM_GPU_SCHED_STAT_NO_HANG: Contrary to scheduler's assumption, the GPU
+ * did not hang and is still running.
*/
enum drm_gpu_sched_stat {
DRM_GPU_SCHED_STAT_NONE,
- DRM_GPU_SCHED_STAT_NOMINAL,
+ DRM_GPU_SCHED_STAT_RESET,
DRM_GPU_SCHED_STAT_ENODEV,
+ DRM_GPU_SCHED_STAT_NO_HANG,
};
/**
@@ -508,6 +515,24 @@ struct drm_sched_backend_ops {
* and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
+
+ /**
+ * @cancel_job: Used by the scheduler to guarantee remaining jobs' fences
+ * get signaled in drm_sched_fini().
+ *
+ * Used by the scheduler to cancel all jobs that have not been executed
+ * with &struct drm_sched_backend_ops.run_job by the time
+ * drm_sched_fini() gets invoked.
+ *
+ * Drivers need to signal the passed job's hardware fence with an
+ * appropriate error code (e.g., -ECANCELED) in this callback. They
+ * must not free the job.
+ *
+ * The scheduler will only call this callback once it stopped calling
+ * all other callbacks forever, with the exception of &struct
+ * drm_sched_backend_ops.free_job.
+ */
+ void (*cancel_job)(struct drm_sched_job *sched_job);
};
/**
@@ -629,7 +654,8 @@ drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
- u32 credits, void *owner);
+ u32 credits, void *owner,
+ u64 drm_client_id);
void drm_sched_job_arm(struct drm_sched_job *job);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job);
int drm_sched_job_add_dependency(struct drm_sched_job *job,
diff --git a/include/drm/intel/pciids.h b/include/drm/intel/pciids.h
index a7ce9523c50d..76f8d26f9cc9 100644
--- a/include/drm/intel/pciids.h
+++ b/include/drm/intel/pciids.h
@@ -846,14 +846,18 @@
/* BMG */
#define INTEL_BMG_IDS(MACRO__, ...) \
MACRO__(0xE202, ## __VA_ARGS__), \
+ MACRO__(0xE209, ## __VA_ARGS__), \
MACRO__(0xE20B, ## __VA_ARGS__), \
MACRO__(0xE20C, ## __VA_ARGS__), \
MACRO__(0xE20D, ## __VA_ARGS__), \
MACRO__(0xE210, ## __VA_ARGS__), \
MACRO__(0xE211, ## __VA_ARGS__), \
MACRO__(0xE212, ## __VA_ARGS__), \
- MACRO__(0xE215, ## __VA_ARGS__), \
- MACRO__(0xE216, ## __VA_ARGS__)
+ MACRO__(0xE216, ## __VA_ARGS__), \
+ MACRO__(0xE220, ## __VA_ARGS__), \
+ MACRO__(0xE221, ## __VA_ARGS__), \
+ MACRO__(0xE222, ## __VA_ARGS__), \
+ MACRO__(0xE223, ## __VA_ARGS__)
/* PTL */
#define INTEL_PTL_IDS(MACRO__, ...) \
@@ -868,6 +872,8 @@
MACRO__(0xB08F, ## __VA_ARGS__), \
MACRO__(0xB090, ## __VA_ARGS__), \
MACRO__(0xB0A0, ## __VA_ARGS__), \
- MACRO__(0xB0B0, ## __VA_ARGS__)
+ MACRO__(0xB0B0, ## __VA_ARGS__), \
+ MACRO__(0xFD80, ## __VA_ARGS__), \
+ MACRO__(0xFD81, ## __VA_ARGS__)
#endif /* __PCIIDS_H__ */
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index cf027558b6db..479b7ed075c0 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -207,11 +207,9 @@ struct ttm_lru_walk_ops {
};
/**
- * struct ttm_lru_walk - Structure describing a LRU walk.
+ * struct ttm_lru_walk_arg - Common part for the variants of BO LRU walk.
*/
-struct ttm_lru_walk {
- /** @ops: Pointer to the ops structure. */
- const struct ttm_lru_walk_ops *ops;
+struct ttm_lru_walk_arg {
/** @ctx: Pointer to the struct ttm_operation_ctx. */
struct ttm_operation_ctx *ctx;
/** @ticket: The struct ww_acquire_ctx if any. */
@@ -220,6 +218,16 @@ struct ttm_lru_walk {
bool trylock_only;
};
+/**
+ * struct ttm_lru_walk - Structure describing a LRU walk.
+ */
+struct ttm_lru_walk {
+ /** @ops: Pointer to the ops structure. */
+ const struct ttm_lru_walk_ops *ops;
+ /** @arg: Common bo LRU walk arguments. */
+ struct ttm_lru_walk_arg arg;
+};
+
s64 ttm_lru_walk_for_evict(struct ttm_lru_walk *walk, struct ttm_device *bdev,
struct ttm_resource_manager *man, s64 target);
@@ -245,34 +253,6 @@ bool ttm_bo_shrink_suitable(struct ttm_buffer_object *bo, struct ttm_operation_c
bool ttm_bo_shrink_avoid_wait(void);
/**
- * ttm_bo_get - reference a struct ttm_buffer_object
- *
- * @bo: The buffer object.
- */
-static inline void ttm_bo_get(struct ttm_buffer_object *bo)
-{
- kref_get(&bo->kref);
-}
-
-/**
- * ttm_bo_get_unless_zero - reference a struct ttm_buffer_object unless
- * its refcount has already reached zero.
- * @bo: The buffer object.
- *
- * Used to reference a TTM buffer object in lookups where the object is removed
- * from the lookup structure during the destructor and for RCU lookups.
- *
- * Returns: @bo if the referencing was successful, NULL otherwise.
- */
-static inline __must_check struct ttm_buffer_object *
-ttm_bo_get_unless_zero(struct ttm_buffer_object *bo)
-{
- if (!kref_get_unless_zero(&bo->kref))
- return NULL;
- return bo;
-}
-
-/**
* ttm_bo_reserve:
*
* @bo: A pointer to a struct ttm_buffer_object.
@@ -429,6 +409,7 @@ int ttm_bo_init_validate(struct ttm_device *bdev, struct ttm_buffer_object *bo,
int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
unsigned long num_pages, struct ttm_bo_kmap_obj *map);
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
+void *ttm_bo_kmap_try_from_panic(struct ttm_buffer_object *bo, unsigned long page);
int ttm_bo_vmap(struct ttm_buffer_object *bo, struct iosys_map *map);
void ttm_bo_vunmap(struct ttm_buffer_object *bo, struct iosys_map *map);
int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
@@ -495,11 +476,6 @@ struct ttm_bo_lru_cursor {
/** @res_curs: Embedded struct ttm_resource_cursor. */
struct ttm_resource_cursor res_curs;
/**
- * @ctx: The struct ttm_operation_ctx used while looping.
- * governs the locking mode.
- */
- struct ttm_operation_ctx *ctx;
- /**
* @bo: Buffer object pointer if a buffer object is refcounted,
* NULL otherwise.
*/
@@ -509,6 +485,8 @@ struct ttm_bo_lru_cursor {
* unlock before the next iteration or after loop exit.
*/
bool needs_unlock;
+ /** @arg: Pointer to common BO LRU walk arguments. */
+ struct ttm_lru_walk_arg *arg;
};
void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
@@ -516,7 +494,7 @@ void ttm_bo_lru_cursor_fini(struct ttm_bo_lru_cursor *curs);
struct ttm_bo_lru_cursor *
ttm_bo_lru_cursor_init(struct ttm_bo_lru_cursor *curs,
struct ttm_resource_manager *man,
- struct ttm_operation_ctx *ctx);
+ struct ttm_lru_walk_arg *arg);
struct ttm_buffer_object *ttm_bo_lru_cursor_first(struct ttm_bo_lru_cursor *curs);
@@ -527,9 +505,9 @@ struct ttm_buffer_object *ttm_bo_lru_cursor_next(struct ttm_bo_lru_cursor *curs)
*/
DEFINE_CLASS(ttm_bo_lru_cursor, struct ttm_bo_lru_cursor *,
if (_T) {ttm_bo_lru_cursor_fini(_T); },
- ttm_bo_lru_cursor_init(curs, man, ctx),
+ ttm_bo_lru_cursor_init(curs, man, arg),
struct ttm_bo_lru_cursor *curs, struct ttm_resource_manager *man,
- struct ttm_operation_ctx *ctx);
+ struct ttm_lru_walk_arg *arg);
static inline void *
class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
{ return *_T; }
@@ -540,7 +518,7 @@ class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
* resources on LRU lists.
* @_cursor: struct ttm_bo_lru_cursor to use for the iteration.
* @_man: The resource manager whose LRU lists to iterate over.
- * @_ctx: The struct ttm_operation_context to govern the @_bo locking.
+ * @_arg: The struct ttm_lru_walk_arg to govern the LRU walk.
* @_bo: The struct ttm_buffer_object pointer pointing to the buffer object
* for the current iteration.
*
@@ -552,10 +530,15 @@ class_ttm_bo_lru_cursor_lock_ptr(class_ttm_bo_lru_cursor_t *_T)
* up at looping termination, even if terminated prematurely by, for
* example a return or break statement. Exiting the loop will also unlock
* (if needed) and unreference @_bo.
+ *
+ * Return: If locking of a bo returns an error, then iteration is terminated
+ * and @_bo is set to a corresponding error pointer. It's illegal to
+ * dereference @_bo after loop exit.
*/
-#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _ctx, _bo) \
- scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _ctx) \
- for ((_bo) = ttm_bo_lru_cursor_first(_cursor); (_bo); \
- (_bo) = ttm_bo_lru_cursor_next(_cursor))
+#define ttm_bo_lru_for_each_reserved_guarded(_cursor, _man, _arg, _bo) \
+ scoped_guard(ttm_bo_lru_cursor, _cursor, _man, _arg) \
+ for ((_bo) = ttm_bo_lru_cursor_first(_cursor); \
+ !IS_ERR_OR_NULL(_bo); \
+ (_bo) = ttm_bo_lru_cursor_next(_cursor))
#endif
diff --git a/include/drm/ttm/ttm_device.h b/include/drm/ttm/ttm_device.h
index 39b8636b1845..592b5f802859 100644
--- a/include/drm/ttm/ttm_device.h
+++ b/include/drm/ttm/ttm_device.h
@@ -272,6 +272,7 @@ struct ttm_device {
int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags);
int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
+int ttm_device_prepare_hibernation(struct ttm_device *bdev);
static inline struct ttm_resource_manager *
ttm_manager_type(struct ttm_device *bdev, int mem_type)
diff --git a/include/dt-bindings/clock/nxp,imx94-clock.h b/include/dt-bindings/clock/nxp,imx94-clock.h
new file mode 100644
index 000000000000..c4ba13352b99
--- /dev/null
+++ b/include/dt-bindings/clock/nxp,imx94-clock.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright 2025 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX94_H
+#define __DT_BINDINGS_CLOCK_IMX94_H
+
+#define IMX94_CLK_DISPMIX_CLK_SEL 0
+
+#define IMX94_CLK_DISPMIX_LVDS_CLK_GATE 0
+
+#endif /* __DT_BINDINGS_CLOCK_IMX94_H */
diff --git a/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h
new file mode 100644
index 000000000000..586d1c9b33b3
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5018-cmn-pll.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ5018_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define IPQ5018_CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ5018. */
+#define IPQ5018_XO_24MHZ_CLK 1
+#define IPQ5018_SLEEP_32KHZ_CLK 2
+#define IPQ5018_ETH_50MHZ_CLK 3
+#endif
diff --git a/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h
new file mode 100644
index 000000000000..f643c2668c04
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,ipq5424-cmn-pll.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2025 Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H
+#define _DT_BINDINGS_CLK_QCOM_IPQ5424_CMN_PLL_H
+
+/* CMN PLL core clock. */
+#define IPQ5424_CMN_PLL_CLK 0
+
+/* The output clocks from CMN PLL of IPQ5424. */
+#define IPQ5424_XO_24MHZ_CLK 1
+#define IPQ5424_SLEEP_32KHZ_CLK 2
+#define IPQ5424_PCS_31P25MHZ_CLK 3
+#define IPQ5424_NSS_300MHZ_CLK 4
+#define IPQ5424_PPE_375MHZ_CLK 5
+#define IPQ5424_ETH0_50MHZ_CLK 6
+#define IPQ5424_ETH1_50MHZ_CLK 7
+#define IPQ5424_ETH2_50MHZ_CLK 8
+#define IPQ5424_ETH_25MHZ_CLK 9
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-camcc.h b/include/dt-bindings/clock/qcom,milos-camcc.h
new file mode 100644
index 000000000000..21925dca9a20
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-camcc.h
@@ -0,0 +1,131 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_MILOS_H
+
+/* CAM_CC clocks */
+#define CAM_CC_PLL0 0
+#define CAM_CC_PLL0_OUT_EVEN 1
+#define CAM_CC_PLL0_OUT_ODD 2
+#define CAM_CC_PLL1 3
+#define CAM_CC_PLL1_OUT_EVEN 4
+#define CAM_CC_PLL2 5
+#define CAM_CC_PLL2_OUT_EVEN 6
+#define CAM_CC_PLL3 7
+#define CAM_CC_PLL3_OUT_EVEN 8
+#define CAM_CC_PLL4 9
+#define CAM_CC_PLL4_OUT_EVEN 10
+#define CAM_CC_PLL5 11
+#define CAM_CC_PLL5_OUT_EVEN 12
+#define CAM_CC_PLL6 13
+#define CAM_CC_PLL6_OUT_EVEN 14
+#define CAM_CC_BPS_AHB_CLK 15
+#define CAM_CC_BPS_AREG_CLK 16
+#define CAM_CC_BPS_CLK 17
+#define CAM_CC_BPS_CLK_SRC 18
+#define CAM_CC_CAMNOC_ATB_CLK 19
+#define CAM_CC_CAMNOC_AXI_CLK_SRC 20
+#define CAM_CC_CAMNOC_AXI_HF_CLK 21
+#define CAM_CC_CAMNOC_AXI_SF_CLK 22
+#define CAM_CC_CAMNOC_NRT_AXI_CLK 23
+#define CAM_CC_CAMNOC_RT_AXI_CLK 24
+#define CAM_CC_CCI_0_CLK 25
+#define CAM_CC_CCI_0_CLK_SRC 26
+#define CAM_CC_CCI_1_CLK 27
+#define CAM_CC_CCI_1_CLK_SRC 28
+#define CAM_CC_CORE_AHB_CLK 29
+#define CAM_CC_CPAS_AHB_CLK 30
+#define CAM_CC_CPHY_RX_CLK_SRC 31
+#define CAM_CC_CRE_AHB_CLK 32
+#define CAM_CC_CRE_CLK 33
+#define CAM_CC_CRE_CLK_SRC 34
+#define CAM_CC_CSI0PHYTIMER_CLK 35
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 36
+#define CAM_CC_CSI1PHYTIMER_CLK 37
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 38
+#define CAM_CC_CSI2PHYTIMER_CLK 39
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 40
+#define CAM_CC_CSI3PHYTIMER_CLK 41
+#define CAM_CC_CSI3PHYTIMER_CLK_SRC 42
+#define CAM_CC_CSIPHY0_CLK 43
+#define CAM_CC_CSIPHY1_CLK 44
+#define CAM_CC_CSIPHY2_CLK 45
+#define CAM_CC_CSIPHY3_CLK 46
+#define CAM_CC_FAST_AHB_CLK_SRC 47
+#define CAM_CC_GDSC_CLK 48
+#define CAM_CC_ICP_ATB_CLK 49
+#define CAM_CC_ICP_CLK 50
+#define CAM_CC_ICP_CLK_SRC 51
+#define CAM_CC_ICP_CTI_CLK 52
+#define CAM_CC_ICP_TS_CLK 53
+#define CAM_CC_MCLK0_CLK 54
+#define CAM_CC_MCLK0_CLK_SRC 55
+#define CAM_CC_MCLK1_CLK 56
+#define CAM_CC_MCLK1_CLK_SRC 57
+#define CAM_CC_MCLK2_CLK 58
+#define CAM_CC_MCLK2_CLK_SRC 59
+#define CAM_CC_MCLK3_CLK 60
+#define CAM_CC_MCLK3_CLK_SRC 61
+#define CAM_CC_MCLK4_CLK 62
+#define CAM_CC_MCLK4_CLK_SRC 63
+#define CAM_CC_OPE_0_AHB_CLK 64
+#define CAM_CC_OPE_0_AREG_CLK 65
+#define CAM_CC_OPE_0_CLK 66
+#define CAM_CC_OPE_0_CLK_SRC 67
+#define CAM_CC_SLEEP_CLK 68
+#define CAM_CC_SLEEP_CLK_SRC 69
+#define CAM_CC_SLOW_AHB_CLK_SRC 70
+#define CAM_CC_SOC_AHB_CLK 71
+#define CAM_CC_SYS_TMR_CLK 72
+#define CAM_CC_TFE_0_AHB_CLK 73
+#define CAM_CC_TFE_0_CLK 74
+#define CAM_CC_TFE_0_CLK_SRC 75
+#define CAM_CC_TFE_0_CPHY_RX_CLK 76
+#define CAM_CC_TFE_0_CSID_CLK 77
+#define CAM_CC_TFE_0_CSID_CLK_SRC 78
+#define CAM_CC_TFE_1_AHB_CLK 79
+#define CAM_CC_TFE_1_CLK 80
+#define CAM_CC_TFE_1_CLK_SRC 81
+#define CAM_CC_TFE_1_CPHY_RX_CLK 82
+#define CAM_CC_TFE_1_CSID_CLK 83
+#define CAM_CC_TFE_1_CSID_CLK_SRC 84
+#define CAM_CC_TFE_2_AHB_CLK 85
+#define CAM_CC_TFE_2_CLK 86
+#define CAM_CC_TFE_2_CLK_SRC 87
+#define CAM_CC_TFE_2_CPHY_RX_CLK 88
+#define CAM_CC_TFE_2_CSID_CLK 89
+#define CAM_CC_TFE_2_CSID_CLK_SRC 90
+#define CAM_CC_TOP_SHIFT_CLK 91
+#define CAM_CC_XO_CLK_SRC 92
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CAMSS_TOP_BCR 2
+#define CAM_CC_CCI_0_BCR 3
+#define CAM_CC_CCI_1_BCR 4
+#define CAM_CC_CPAS_BCR 5
+#define CAM_CC_CRE_BCR 6
+#define CAM_CC_CSI0PHY_BCR 7
+#define CAM_CC_CSI1PHY_BCR 8
+#define CAM_CC_CSI2PHY_BCR 9
+#define CAM_CC_CSI3PHY_BCR 10
+#define CAM_CC_ICP_BCR 11
+#define CAM_CC_MCLK0_BCR 12
+#define CAM_CC_MCLK1_BCR 13
+#define CAM_CC_MCLK2_BCR 14
+#define CAM_CC_MCLK3_BCR 15
+#define CAM_CC_MCLK4_BCR 16
+#define CAM_CC_OPE_0_BCR 17
+#define CAM_CC_TFE_0_BCR 18
+#define CAM_CC_TFE_1_BCR 19
+#define CAM_CC_TFE_2_BCR 20
+
+/* CAM_CC power domains */
+#define CAM_CC_CAMSS_TOP_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-dispcc.h b/include/dt-bindings/clock/qcom,milos-dispcc.h
new file mode 100644
index 000000000000..c70f23f32f0a
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-dispcc.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_MILOS_H
+
+/* DISP_CC clocks */
+#define DISP_CC_PLL0 0
+#define DISP_CC_MDSS_ACCU_CLK 1
+#define DISP_CC_MDSS_AHB1_CLK 2
+#define DISP_CC_MDSS_AHB_CLK 3
+#define DISP_CC_MDSS_AHB_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_CLK 5
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 7
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 8
+#define DISP_CC_MDSS_DPTX0_AUX_CLK 9
+#define DISP_CC_MDSS_DPTX0_AUX_CLK_SRC 10
+#define DISP_CC_MDSS_DPTX0_CRYPTO_CLK 11
+#define DISP_CC_MDSS_DPTX0_LINK_CLK 12
+#define DISP_CC_MDSS_DPTX0_LINK_CLK_SRC 13
+#define DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_DPTX0_LINK_INTF_CLK 15
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK 16
+#define DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC 17
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK 18
+#define DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC 19
+#define DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK 20
+#define DISP_CC_MDSS_ESC0_CLK 21
+#define DISP_CC_MDSS_ESC0_CLK_SRC 22
+#define DISP_CC_MDSS_MDP1_CLK 23
+#define DISP_CC_MDSS_MDP_CLK 24
+#define DISP_CC_MDSS_MDP_CLK_SRC 25
+#define DISP_CC_MDSS_MDP_LUT1_CLK 26
+#define DISP_CC_MDSS_MDP_LUT_CLK 27
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 28
+#define DISP_CC_MDSS_PCLK0_CLK 29
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 30
+#define DISP_CC_MDSS_RSCC_AHB_CLK 31
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 32
+#define DISP_CC_MDSS_VSYNC1_CLK 33
+#define DISP_CC_MDSS_VSYNC_CLK 34
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 35
+#define DISP_CC_SLEEP_CLK 36
+#define DISP_CC_SLEEP_CLK_SRC 37
+#define DISP_CC_XO_CLK 38
+#define DISP_CC_XO_CLK_SRC 39
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_CORE_INT2_BCR 1
+#define DISP_CC_MDSS_RSCC_BCR 2
+
+/* DISP_CC power domains */
+#define DISP_CC_MDSS_CORE_GDSC 0
+#define DISP_CC_MDSS_CORE_INT2_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-gcc.h b/include/dt-bindings/clock/qcom,milos-gcc.h
new file mode 100644
index 000000000000..a530ca39e1ef
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-gcc.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_MILOS_H
+
+/* GCC clocks */
+#define GCC_GPLL0 0
+#define GCC_GPLL0_OUT_EVEN 1
+#define GCC_GPLL2 2
+#define GCC_GPLL4 3
+#define GCC_GPLL6 4
+#define GCC_GPLL7 5
+#define GCC_GPLL9 6
+#define GCC_AGGRE_NOC_PCIE_AXI_CLK 7
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 8
+#define GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK 9
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 10
+#define GCC_BOOT_ROM_AHB_CLK 11
+#define GCC_CAMERA_AHB_CLK 12
+#define GCC_CAMERA_HF_AXI_CLK 13
+#define GCC_CAMERA_HF_XO_CLK 14
+#define GCC_CAMERA_SF_AXI_CLK 15
+#define GCC_CAMERA_SF_XO_CLK 16
+#define GCC_CFG_NOC_PCIE_ANOC_AHB_CLK 17
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 18
+#define GCC_CNOC_PCIE_SF_AXI_CLK 19
+#define GCC_DDRSS_GPU_AXI_CLK 20
+#define GCC_DDRSS_PCIE_SF_QTB_CLK 21
+#define GCC_DISP_AHB_CLK 22
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 23
+#define GCC_DISP_HF_AXI_CLK 24
+#define GCC_DISP_XO_CLK 25
+#define GCC_GP1_CLK 26
+#define GCC_GP1_CLK_SRC 27
+#define GCC_GP2_CLK 28
+#define GCC_GP2_CLK_SRC 29
+#define GCC_GP3_CLK 30
+#define GCC_GP3_CLK_SRC 31
+#define GCC_GPU_CFG_AHB_CLK 32
+#define GCC_GPU_GPLL0_CLK_SRC 33
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 34
+#define GCC_GPU_MEMNOC_GFX_CLK 35
+#define GCC_GPU_SNOC_DVM_GFX_CLK 36
+#define GCC_PCIE_0_AUX_CLK 37
+#define GCC_PCIE_0_AUX_CLK_SRC 38
+#define GCC_PCIE_0_CFG_AHB_CLK 39
+#define GCC_PCIE_0_MSTR_AXI_CLK 40
+#define GCC_PCIE_0_PHY_RCHNG_CLK 41
+#define GCC_PCIE_0_PHY_RCHNG_CLK_SRC 42
+#define GCC_PCIE_0_PIPE_CLK 43
+#define GCC_PCIE_0_PIPE_CLK_SRC 44
+#define GCC_PCIE_0_PIPE_DIV2_CLK 45
+#define GCC_PCIE_0_PIPE_DIV2_CLK_SRC 46
+#define GCC_PCIE_0_SLV_AXI_CLK 47
+#define GCC_PCIE_0_SLV_Q2A_AXI_CLK 48
+#define GCC_PCIE_1_AUX_CLK 49
+#define GCC_PCIE_1_AUX_CLK_SRC 50
+#define GCC_PCIE_1_CFG_AHB_CLK 51
+#define GCC_PCIE_1_MSTR_AXI_CLK 52
+#define GCC_PCIE_1_PHY_RCHNG_CLK 53
+#define GCC_PCIE_1_PHY_RCHNG_CLK_SRC 54
+#define GCC_PCIE_1_PIPE_CLK 55
+#define GCC_PCIE_1_PIPE_CLK_SRC 56
+#define GCC_PCIE_1_PIPE_DIV2_CLK 57
+#define GCC_PCIE_1_PIPE_DIV2_CLK_SRC 58
+#define GCC_PCIE_1_SLV_AXI_CLK 59
+#define GCC_PCIE_1_SLV_Q2A_AXI_CLK 60
+#define GCC_PCIE_RSCC_CFG_AHB_CLK 61
+#define GCC_PCIE_RSCC_XO_CLK 62
+#define GCC_PDM2_CLK 63
+#define GCC_PDM2_CLK_SRC 64
+#define GCC_PDM_AHB_CLK 65
+#define GCC_PDM_XO4_CLK 66
+#define GCC_QMIP_CAMERA_NRT_AHB_CLK 67
+#define GCC_QMIP_CAMERA_RT_AHB_CLK 68
+#define GCC_QMIP_DISP_AHB_CLK 69
+#define GCC_QMIP_GPU_AHB_CLK 70
+#define GCC_QMIP_PCIE_AHB_CLK 71
+#define GCC_QMIP_VIDEO_CV_CPU_AHB_CLK 72
+#define GCC_QMIP_VIDEO_CVP_AHB_CLK 73
+#define GCC_QMIP_VIDEO_V_CPU_AHB_CLK 74
+#define GCC_QMIP_VIDEO_VCODEC_AHB_CLK 75
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 76
+#define GCC_QUPV3_WRAP0_CORE_CLK 77
+#define GCC_QUPV3_WRAP0_QSPI_REF_CLK 78
+#define GCC_QUPV3_WRAP0_QSPI_REF_CLK_SRC 79
+#define GCC_QUPV3_WRAP0_S0_CLK 80
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 81
+#define GCC_QUPV3_WRAP0_S1_CLK 82
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 83
+#define GCC_QUPV3_WRAP0_S2_CLK 84
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 85
+#define GCC_QUPV3_WRAP0_S3_CLK 86
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 87
+#define GCC_QUPV3_WRAP0_S4_CLK 88
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 89
+#define GCC_QUPV3_WRAP0_S5_CLK 90
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 91
+#define GCC_QUPV3_WRAP0_S6_CLK 92
+#define GCC_QUPV3_WRAP0_S6_CLK_SRC 93
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 94
+#define GCC_QUPV3_WRAP1_CORE_CLK 95
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK 96
+#define GCC_QUPV3_WRAP1_QSPI_REF_CLK_SRC 97
+#define GCC_QUPV3_WRAP1_S0_CLK 98
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 99
+#define GCC_QUPV3_WRAP1_S1_CLK 100
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 101
+#define GCC_QUPV3_WRAP1_S2_CLK 102
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 103
+#define GCC_QUPV3_WRAP1_S3_CLK 104
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 105
+#define GCC_QUPV3_WRAP1_S4_CLK 106
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 107
+#define GCC_QUPV3_WRAP1_S5_CLK 108
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 109
+#define GCC_QUPV3_WRAP1_S6_CLK 110
+#define GCC_QUPV3_WRAP1_S6_CLK_SRC 111
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 112
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 113
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 114
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 115
+#define GCC_SDCC1_AHB_CLK 116
+#define GCC_SDCC1_APPS_CLK 117
+#define GCC_SDCC1_APPS_CLK_SRC 118
+#define GCC_SDCC1_ICE_CORE_CLK 119
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 120
+#define GCC_SDCC2_AHB_CLK 121
+#define GCC_SDCC2_APPS_CLK 122
+#define GCC_SDCC2_APPS_CLK_SRC 123
+#define GCC_UFS_PHY_AHB_CLK 124
+#define GCC_UFS_PHY_AXI_CLK 125
+#define GCC_UFS_PHY_AXI_CLK_SRC 126
+#define GCC_UFS_PHY_AXI_HW_CTL_CLK 127
+#define GCC_UFS_PHY_ICE_CORE_CLK 128
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 129
+#define GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK 130
+#define GCC_UFS_PHY_PHY_AUX_CLK 131
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 132
+#define GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK 133
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 134
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK_SRC 135
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK 136
+#define GCC_UFS_PHY_RX_SYMBOL_1_CLK_SRC 137
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 138
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK_SRC 139
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 140
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 141
+#define GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK 142
+#define GCC_USB30_PRIM_ATB_CLK 143
+#define GCC_USB30_PRIM_MASTER_CLK 144
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 145
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 146
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 147
+#define GCC_USB30_PRIM_MOCK_UTMI_POSTDIV_CLK_SRC 148
+#define GCC_USB30_PRIM_SLEEP_CLK 149
+#define GCC_USB3_PRIM_PHY_AUX_CLK 150
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 151
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 152
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 153
+#define GCC_USB3_PRIM_PHY_PIPE_CLK_SRC 154
+#define GCC_VIDEO_AHB_CLK 155
+#define GCC_VIDEO_AXI0_CLK 156
+#define GCC_VIDEO_XO_CLK 157
+
+/* GCC resets */
+#define GCC_CAMERA_BCR 0
+#define GCC_DISPLAY_BCR 1
+#define GCC_GPU_BCR 2
+#define GCC_PCIE_0_BCR 3
+#define GCC_PCIE_0_LINK_DOWN_BCR 4
+#define GCC_PCIE_0_NOCSR_COM_PHY_BCR 5
+#define GCC_PCIE_0_PHY_BCR 6
+#define GCC_PCIE_0_PHY_NOCSR_COM_PHY_BCR 7
+#define GCC_PCIE_1_BCR 8
+#define GCC_PCIE_1_LINK_DOWN_BCR 9
+#define GCC_PCIE_1_NOCSR_COM_PHY_BCR 10
+#define GCC_PCIE_1_PHY_BCR 11
+#define GCC_PCIE_1_PHY_NOCSR_COM_PHY_BCR 12
+#define GCC_PCIE_RSCC_BCR 13
+#define GCC_PDM_BCR 14
+#define GCC_QUPV3_WRAPPER_0_BCR 15
+#define GCC_QUPV3_WRAPPER_1_BCR 16
+#define GCC_QUSB2PHY_PRIM_BCR 17
+#define GCC_QUSB2PHY_SEC_BCR 18
+#define GCC_SDCC1_BCR 19
+#define GCC_SDCC2_BCR 20
+#define GCC_UFS_PHY_BCR 21
+#define GCC_USB30_PRIM_BCR 22
+#define GCC_USB3_DP_PHY_PRIM_BCR 23
+#define GCC_USB3_PHY_PRIM_BCR 24
+#define GCC_USB3PHY_PHY_PRIM_BCR 25
+#define GCC_VIDEO_AXI0_CLK_ARES 26
+#define GCC_VIDEO_BCR 27
+
+/* GCC power domains */
+#define PCIE_0_GDSC 0
+#define PCIE_0_PHY_GDSC 1
+#define PCIE_1_GDSC 2
+#define PCIE_1_PHY_GDSC 3
+#define UFS_PHY_GDSC 4
+#define UFS_MEM_PHY_GDSC 5
+#define USB30_PRIM_GDSC 6
+#define USB3_PHY_GDSC 7
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-gpucc.h b/include/dt-bindings/clock/qcom,milos-gpucc.h
new file mode 100644
index 000000000000..6ff1925d409f
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-gpucc.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_MILOS_H
+
+/* GPU_CC clocks */
+#define GPU_CC_PLL0 0
+#define GPU_CC_PLL0_OUT_EVEN 1
+#define GPU_CC_AHB_CLK 2
+#define GPU_CC_CB_CLK 3
+#define GPU_CC_CX_ACCU_SHIFT_CLK 4
+#define GPU_CC_CX_FF_CLK 5
+#define GPU_CC_CX_GMU_CLK 6
+#define GPU_CC_CXO_AON_CLK 7
+#define GPU_CC_CXO_CLK 8
+#define GPU_CC_DEMET_CLK 9
+#define GPU_CC_DEMET_DIV_CLK_SRC 10
+#define GPU_CC_DPM_CLK 11
+#define GPU_CC_FF_CLK_SRC 12
+#define GPU_CC_FREQ_MEASURE_CLK 13
+#define GPU_CC_GMU_CLK_SRC 14
+#define GPU_CC_GX_ACCU_SHIFT_CLK 15
+#define GPU_CC_GX_ACD_AHB_FF_CLK 16
+#define GPU_CC_GX_AHB_FF_CLK 17
+#define GPU_CC_GX_GMU_CLK 18
+#define GPU_CC_GX_RCG_AHB_FF_CLK 19
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 20
+#define GPU_CC_HUB_AON_CLK 21
+#define GPU_CC_HUB_CLK_SRC 22
+#define GPU_CC_HUB_CX_INT_CLK 23
+#define GPU_CC_HUB_DIV_CLK_SRC 24
+#define GPU_CC_MEMNOC_GFX_CLK 25
+#define GPU_CC_RSCC_HUB_AON_CLK 26
+#define GPU_CC_RSCC_XO_AON_CLK 27
+#define GPU_CC_SLEEP_CLK 28
+#define GPU_CC_XO_CLK_SRC 29
+#define GPU_CC_XO_DIV_CLK_SRC 30
+
+/* GPU_CC resets */
+#define GPU_CC_CB_BCR 0
+#define GPU_CC_CX_BCR 1
+#define GPU_CC_FAST_HUB_BCR 2
+#define GPU_CC_FF_BCR 3
+#define GPU_CC_GMU_BCR 4
+#define GPU_CC_GX_BCR 5
+#define GPU_CC_RBCPR_BCR 6
+#define GPU_CC_XO_BCR 7
+
+/* GPU_CC power domains */
+#define GPU_CC_CX_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,milos-videocc.h b/include/dt-bindings/clock/qcom,milos-videocc.h
new file mode 100644
index 000000000000..3544db81ffae
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,milos-videocc.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2025, Luca Weiss <luca.weiss@fairphone.com>
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_MILOS_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_PLL0 0
+#define VIDEO_CC_AHB_CLK 1
+#define VIDEO_CC_AHB_CLK_SRC 2
+#define VIDEO_CC_MVS0_CLK 3
+#define VIDEO_CC_MVS0_CLK_SRC 4
+#define VIDEO_CC_MVS0_DIV_CLK_SRC 5
+#define VIDEO_CC_MVS0_SHIFT_CLK 6
+#define VIDEO_CC_MVS0C_CLK 7
+#define VIDEO_CC_MVS0C_DIV2_DIV_CLK_SRC 8
+#define VIDEO_CC_MVS0C_SHIFT_CLK 9
+#define VIDEO_CC_SLEEP_CLK 10
+#define VIDEO_CC_SLEEP_CLK_SRC 11
+#define VIDEO_CC_XO_CLK 12
+#define VIDEO_CC_XO_CLK_SRC 13
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_MVS0_BCR 1
+#define VIDEO_CC_MVS0C_CLK_ARES 2
+#define VIDEO_CC_MVS0C_BCR 3
+
+/* VIDEO_CC power domains */
+#define VIDEO_CC_MVS0_GDSC 0
+#define VIDEO_CC_MVS0C_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-camcc.h b/include/dt-bindings/clock/qcom,qcs615-camcc.h
new file mode 100644
index 000000000000..aec57dddc067
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-camcc.h
@@ -0,0 +1,110 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_CAM_CC_QCS615_H
+
+/* CAM_CC clocks */
+#define CAM_CC_BPS_AHB_CLK 0
+#define CAM_CC_BPS_AREG_CLK 1
+#define CAM_CC_BPS_AXI_CLK 2
+#define CAM_CC_BPS_CLK 3
+#define CAM_CC_BPS_CLK_SRC 4
+#define CAM_CC_CAMNOC_ATB_CLK 5
+#define CAM_CC_CAMNOC_AXI_CLK 6
+#define CAM_CC_CCI_CLK 7
+#define CAM_CC_CCI_CLK_SRC 8
+#define CAM_CC_CORE_AHB_CLK 9
+#define CAM_CC_CPAS_AHB_CLK 10
+#define CAM_CC_CPHY_RX_CLK_SRC 11
+#define CAM_CC_CSI0PHYTIMER_CLK 12
+#define CAM_CC_CSI0PHYTIMER_CLK_SRC 13
+#define CAM_CC_CSI1PHYTIMER_CLK 14
+#define CAM_CC_CSI1PHYTIMER_CLK_SRC 15
+#define CAM_CC_CSI2PHYTIMER_CLK 16
+#define CAM_CC_CSI2PHYTIMER_CLK_SRC 17
+#define CAM_CC_CSIPHY0_CLK 18
+#define CAM_CC_CSIPHY1_CLK 19
+#define CAM_CC_CSIPHY2_CLK 20
+#define CAM_CC_FAST_AHB_CLK_SRC 21
+#define CAM_CC_ICP_ATB_CLK 22
+#define CAM_CC_ICP_CLK 23
+#define CAM_CC_ICP_CLK_SRC 24
+#define CAM_CC_ICP_CTI_CLK 25
+#define CAM_CC_ICP_TS_CLK 26
+#define CAM_CC_IFE_0_AXI_CLK 27
+#define CAM_CC_IFE_0_CLK 28
+#define CAM_CC_IFE_0_CLK_SRC 29
+#define CAM_CC_IFE_0_CPHY_RX_CLK 30
+#define CAM_CC_IFE_0_CSID_CLK 31
+#define CAM_CC_IFE_0_CSID_CLK_SRC 32
+#define CAM_CC_IFE_0_DSP_CLK 33
+#define CAM_CC_IFE_1_AXI_CLK 34
+#define CAM_CC_IFE_1_CLK 35
+#define CAM_CC_IFE_1_CLK_SRC 36
+#define CAM_CC_IFE_1_CPHY_RX_CLK 37
+#define CAM_CC_IFE_1_CSID_CLK 38
+#define CAM_CC_IFE_1_CSID_CLK_SRC 39
+#define CAM_CC_IFE_1_DSP_CLK 40
+#define CAM_CC_IFE_LITE_CLK 41
+#define CAM_CC_IFE_LITE_CLK_SRC 42
+#define CAM_CC_IFE_LITE_CPHY_RX_CLK 43
+#define CAM_CC_IFE_LITE_CSID_CLK 44
+#define CAM_CC_IFE_LITE_CSID_CLK_SRC 45
+#define CAM_CC_IPE_0_AHB_CLK 46
+#define CAM_CC_IPE_0_AREG_CLK 47
+#define CAM_CC_IPE_0_AXI_CLK 48
+#define CAM_CC_IPE_0_CLK 49
+#define CAM_CC_IPE_0_CLK_SRC 50
+#define CAM_CC_JPEG_CLK 51
+#define CAM_CC_JPEG_CLK_SRC 52
+#define CAM_CC_LRME_CLK 53
+#define CAM_CC_LRME_CLK_SRC 54
+#define CAM_CC_MCLK0_CLK 55
+#define CAM_CC_MCLK0_CLK_SRC 56
+#define CAM_CC_MCLK1_CLK 57
+#define CAM_CC_MCLK1_CLK_SRC 58
+#define CAM_CC_MCLK2_CLK 59
+#define CAM_CC_MCLK2_CLK_SRC 60
+#define CAM_CC_MCLK3_CLK 61
+#define CAM_CC_MCLK3_CLK_SRC 62
+#define CAM_CC_PLL0 63
+#define CAM_CC_PLL1 64
+#define CAM_CC_PLL2 65
+#define CAM_CC_PLL2_OUT_AUX2 66
+#define CAM_CC_PLL3 67
+#define CAM_CC_SLOW_AHB_CLK_SRC 68
+#define CAM_CC_SOC_AHB_CLK 69
+#define CAM_CC_SYS_TMR_CLK 70
+
+/* CAM_CC power domains */
+#define BPS_GDSC 0
+#define IFE_0_GDSC 1
+#define IFE_1_GDSC 2
+#define IPE_0_GDSC 3
+#define TITAN_TOP_GDSC 4
+
+/* CAM_CC resets */
+#define CAM_CC_BPS_BCR 0
+#define CAM_CC_CAMNOC_BCR 1
+#define CAM_CC_CCI_BCR 2
+#define CAM_CC_CPAS_BCR 3
+#define CAM_CC_CSI0PHY_BCR 4
+#define CAM_CC_CSI1PHY_BCR 5
+#define CAM_CC_CSI2PHY_BCR 6
+#define CAM_CC_ICP_BCR 7
+#define CAM_CC_IFE_0_BCR 8
+#define CAM_CC_IFE_1_BCR 9
+#define CAM_CC_IFE_LITE_BCR 10
+#define CAM_CC_IPE_0_BCR 11
+#define CAM_CC_JPEG_BCR 12
+#define CAM_CC_LRME_BCR 13
+#define CAM_CC_MCLK0_BCR 14
+#define CAM_CC_MCLK1_BCR 15
+#define CAM_CC_MCLK2_BCR 16
+#define CAM_CC_MCLK3_BCR 17
+#define CAM_CC_TITAN_TOP_BCR 18
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-dispcc.h b/include/dt-bindings/clock/qcom,qcs615-dispcc.h
new file mode 100644
index 000000000000..9a29945c5762
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-dispcc.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_QCS615_H
+
+/* DISP_CC clocks */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AHB_CLK_SRC 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 4
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 5
+#define DISP_CC_MDSS_DP_AUX_CLK 6
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 7
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 8
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 9
+#define DISP_CC_MDSS_DP_LINK_CLK 10
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 12
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 13
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 14
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK 16
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 17
+#define DISP_CC_MDSS_ESC0_CLK 18
+#define DISP_CC_MDSS_ESC0_CLK_SRC 19
+#define DISP_CC_MDSS_MDP_CLK 20
+#define DISP_CC_MDSS_MDP_CLK_SRC 21
+#define DISP_CC_MDSS_MDP_LUT_CLK 22
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 23
+#define DISP_CC_MDSS_PCLK0_CLK 24
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 25
+#define DISP_CC_MDSS_ROT_CLK 26
+#define DISP_CC_MDSS_ROT_CLK_SRC 27
+#define DISP_CC_MDSS_RSCC_AHB_CLK 28
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 29
+#define DISP_CC_MDSS_VSYNC_CLK 30
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 31
+#define DISP_CC_PLL0 32
+#define DISP_CC_XO_CLK 33
+
+/* DISP_CC power domains */
+#define MDSS_CORE_GDSC 0
+
+/* DISP_CC resets */
+#define DISP_CC_MDSS_CORE_BCR 0
+#define DISP_CC_MDSS_RSCC_BCR 1
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-gpucc.h b/include/dt-bindings/clock/qcom,qcs615-gpucc.h
new file mode 100644
index 000000000000..6d8394b90d59
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-gpucc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_QCS615_H
+
+/* GPU_CC clocks */
+#define CRC_DIV_PLL0 0
+#define CRC_DIV_PLL1 1
+#define GPU_CC_PLL0 2
+#define GPU_CC_PLL1 3
+#define GPU_CC_CRC_AHB_CLK 4
+#define GPU_CC_CX_GFX3D_CLK 5
+#define GPU_CC_CX_GFX3D_SLV_CLK 6
+#define GPU_CC_CX_GMU_CLK 7
+#define GPU_CC_CX_SNOC_DVM_CLK 8
+#define GPU_CC_CXO_AON_CLK 9
+#define GPU_CC_CXO_CLK 10
+#define GPU_CC_GMU_CLK_SRC 11
+#define GPU_CC_GX_GFX3D_CLK 12
+#define GPU_CC_GX_GFX3D_CLK_SRC 13
+#define GPU_CC_GX_GMU_CLK 14
+#define GPU_CC_HLOS1_VOTE_GPU_SMMU_CLK 15
+#define GPU_CC_SLEEP_CLK 16
+
+/* GPU_CC power domains */
+#define CX_GDSC 0
+#define GX_GDSC 1
+
+/* GPU_CC resets */
+#define GPU_CC_CX_BCR 0
+#define GPU_CC_GFX3D_AON_BCR 1
+#define GPU_CC_GMU_BCR 2
+#define GPU_CC_GX_BCR 3
+#define GPU_CC_XO_BCR 4
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,qcs615-videocc.h b/include/dt-bindings/clock/qcom,qcs615-videocc.h
new file mode 100644
index 000000000000..0ca3efb21103
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,qcs615-videocc.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_QCS615_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_CC_SLEEP_CLK 0
+#define VIDEO_CC_SLEEP_CLK_SRC 1
+#define VIDEO_CC_VCODEC0_AXI_CLK 2
+#define VIDEO_CC_VCODEC0_CORE_CLK 3
+#define VIDEO_CC_VENUS_AHB_CLK 4
+#define VIDEO_CC_VENUS_CLK_SRC 5
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 6
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 7
+#define VIDEO_CC_XO_CLK 8
+#define VIDEO_PLL0 9
+
+/* VIDEO_CC power domains */
+#define VCODEC0_GDSC 0
+#define VENUS_GDSC 1
+
+/* VIDEO_CC resets */
+#define VIDEO_CC_INTERFACE_BCR 0
+#define VIDEO_CC_VCODEC0_BCR 1
+#define VIDEO_CC_VENUS_BCR 2
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,x1e80100-gcc.h b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
index 24ba9e2a5cf6..710c340f24a5 100644
--- a/include/dt-bindings/clock/qcom,x1e80100-gcc.h
+++ b/include/dt-bindings/clock/qcom,x1e80100-gcc.h
@@ -482,4 +482,6 @@
#define GCC_USB_1_PHY_BCR 85
#define GCC_USB_2_PHY_BCR 86
#define GCC_VIDEO_BCR 87
+#define GCC_VIDEO_AXI0_CLK_ARES 88
+#define GCC_VIDEO_AXI1_CLK_ARES 89
#endif
diff --git a/include/dt-bindings/clock/r9a07g043-cpg.h b/include/dt-bindings/clock/r9a07g043-cpg.h
index 131993343777..e1f65f1928cf 100644
--- a/include/dt-bindings/clock/r9a07g043-cpg.h
+++ b/include/dt-bindings/clock/r9a07g043-cpg.h
@@ -200,57 +200,4 @@
#define R9A07G043_AX45MP_CORE0_RESETN 78 /* RZ/Five Only */
#define R9A07G043_IAX45_RESETN 79 /* RZ/Five Only */
-/* Power domain IDs. */
-#define R9A07G043_PD_ALWAYS_ON 0
-#define R9A07G043_PD_GIC 1 /* RZ/G2UL Only */
-#define R9A07G043_PD_IA55 2 /* RZ/G2UL Only */
-#define R9A07G043_PD_MHU 3 /* RZ/G2UL Only */
-#define R9A07G043_PD_CORESIGHT 4 /* RZ/G2UL Only */
-#define R9A07G043_PD_SYC 5 /* RZ/G2UL Only */
-#define R9A07G043_PD_DMAC 6
-#define R9A07G043_PD_GTM0 7
-#define R9A07G043_PD_GTM1 8
-#define R9A07G043_PD_GTM2 9
-#define R9A07G043_PD_MTU 10
-#define R9A07G043_PD_POE3 11
-#define R9A07G043_PD_WDT0 12
-#define R9A07G043_PD_SPI 13
-#define R9A07G043_PD_SDHI0 14
-#define R9A07G043_PD_SDHI1 15
-#define R9A07G043_PD_ISU 16 /* RZ/G2UL Only */
-#define R9A07G043_PD_CRU 17 /* RZ/G2UL Only */
-#define R9A07G043_PD_LCDC 18 /* RZ/G2UL Only */
-#define R9A07G043_PD_SSI0 19
-#define R9A07G043_PD_SSI1 20
-#define R9A07G043_PD_SSI2 21
-#define R9A07G043_PD_SSI3 22
-#define R9A07G043_PD_SRC 23
-#define R9A07G043_PD_USB0 24
-#define R9A07G043_PD_USB1 25
-#define R9A07G043_PD_USB_PHY 26
-#define R9A07G043_PD_ETHER0 27
-#define R9A07G043_PD_ETHER1 28
-#define R9A07G043_PD_I2C0 29
-#define R9A07G043_PD_I2C1 30
-#define R9A07G043_PD_I2C2 31
-#define R9A07G043_PD_I2C3 32
-#define R9A07G043_PD_SCIF0 33
-#define R9A07G043_PD_SCIF1 34
-#define R9A07G043_PD_SCIF2 35
-#define R9A07G043_PD_SCIF3 36
-#define R9A07G043_PD_SCIF4 37
-#define R9A07G043_PD_SCI0 38
-#define R9A07G043_PD_SCI1 39
-#define R9A07G043_PD_IRDA 40
-#define R9A07G043_PD_RSPI0 41
-#define R9A07G043_PD_RSPI1 42
-#define R9A07G043_PD_RSPI2 43
-#define R9A07G043_PD_CANFD 44
-#define R9A07G043_PD_ADC 45
-#define R9A07G043_PD_TSU 46
-#define R9A07G043_PD_PLIC 47 /* RZ/Five Only */
-#define R9A07G043_PD_IAX45 48 /* RZ/Five Only */
-#define R9A07G043_PD_NCEPLDM 49 /* RZ/Five Only */
-#define R9A07G043_PD_NCEPLMT 50 /* RZ/Five Only */
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G043_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g044-cpg.h b/include/dt-bindings/clock/r9a07g044-cpg.h
index e209f96f92b7..0bb17ff1a01a 100644
--- a/include/dt-bindings/clock/r9a07g044-cpg.h
+++ b/include/dt-bindings/clock/r9a07g044-cpg.h
@@ -217,62 +217,4 @@
#define R9A07G044_ADC_ADRST_N 82
#define R9A07G044_TSU_PRESETN 83
-/* Power domain IDs. */
-#define R9A07G044_PD_ALWAYS_ON 0
-#define R9A07G044_PD_GIC 1
-#define R9A07G044_PD_IA55 2
-#define R9A07G044_PD_MHU 3
-#define R9A07G044_PD_CORESIGHT 4
-#define R9A07G044_PD_SYC 5
-#define R9A07G044_PD_DMAC 6
-#define R9A07G044_PD_GTM0 7
-#define R9A07G044_PD_GTM1 8
-#define R9A07G044_PD_GTM2 9
-#define R9A07G044_PD_MTU 10
-#define R9A07G044_PD_POE3 11
-#define R9A07G044_PD_GPT 12
-#define R9A07G044_PD_POEGA 13
-#define R9A07G044_PD_POEGB 14
-#define R9A07G044_PD_POEGC 15
-#define R9A07G044_PD_POEGD 16
-#define R9A07G044_PD_WDT0 17
-#define R9A07G044_PD_WDT1 18
-#define R9A07G044_PD_SPI 19
-#define R9A07G044_PD_SDHI0 20
-#define R9A07G044_PD_SDHI1 21
-#define R9A07G044_PD_3DGE 22
-#define R9A07G044_PD_ISU 23
-#define R9A07G044_PD_VCPL4 24
-#define R9A07G044_PD_CRU 25
-#define R9A07G044_PD_MIPI_DSI 26
-#define R9A07G044_PD_LCDC 27
-#define R9A07G044_PD_SSI0 28
-#define R9A07G044_PD_SSI1 29
-#define R9A07G044_PD_SSI2 30
-#define R9A07G044_PD_SSI3 31
-#define R9A07G044_PD_SRC 32
-#define R9A07G044_PD_USB0 33
-#define R9A07G044_PD_USB1 34
-#define R9A07G044_PD_USB_PHY 35
-#define R9A07G044_PD_ETHER0 36
-#define R9A07G044_PD_ETHER1 37
-#define R9A07G044_PD_I2C0 38
-#define R9A07G044_PD_I2C1 39
-#define R9A07G044_PD_I2C2 40
-#define R9A07G044_PD_I2C3 41
-#define R9A07G044_PD_SCIF0 42
-#define R9A07G044_PD_SCIF1 43
-#define R9A07G044_PD_SCIF2 44
-#define R9A07G044_PD_SCIF3 45
-#define R9A07G044_PD_SCIF4 46
-#define R9A07G044_PD_SCI0 47
-#define R9A07G044_PD_SCI1 48
-#define R9A07G044_PD_IRDA 49
-#define R9A07G044_PD_RSPI0 50
-#define R9A07G044_PD_RSPI1 51
-#define R9A07G044_PD_RSPI2 52
-#define R9A07G044_PD_CANFD 53
-#define R9A07G044_PD_ADC 54
-#define R9A07G044_PD_TSU 55
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G044_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a07g054-cpg.h b/include/dt-bindings/clock/r9a07g054-cpg.h
index 2c99f89397c4..43f4dbda872c 100644
--- a/include/dt-bindings/clock/r9a07g054-cpg.h
+++ b/include/dt-bindings/clock/r9a07g054-cpg.h
@@ -226,62 +226,4 @@
#define R9A07G054_TSU_PRESETN 83
#define R9A07G054_STPAI_ARESETN 84
-/* Power domain IDs. */
-#define R9A07G054_PD_ALWAYS_ON 0
-#define R9A07G054_PD_GIC 1
-#define R9A07G054_PD_IA55 2
-#define R9A07G054_PD_MHU 3
-#define R9A07G054_PD_CORESIGHT 4
-#define R9A07G054_PD_SYC 5
-#define R9A07G054_PD_DMAC 6
-#define R9A07G054_PD_GTM0 7
-#define R9A07G054_PD_GTM1 8
-#define R9A07G054_PD_GTM2 9
-#define R9A07G054_PD_MTU 10
-#define R9A07G054_PD_POE3 11
-#define R9A07G054_PD_GPT 12
-#define R9A07G054_PD_POEGA 13
-#define R9A07G054_PD_POEGB 14
-#define R9A07G054_PD_POEGC 15
-#define R9A07G054_PD_POEGD 16
-#define R9A07G054_PD_WDT0 17
-#define R9A07G054_PD_WDT1 18
-#define R9A07G054_PD_SPI 19
-#define R9A07G054_PD_SDHI0 20
-#define R9A07G054_PD_SDHI1 21
-#define R9A07G054_PD_3DGE 22
-#define R9A07G054_PD_ISU 23
-#define R9A07G054_PD_VCPL4 24
-#define R9A07G054_PD_CRU 25
-#define R9A07G054_PD_MIPI_DSI 26
-#define R9A07G054_PD_LCDC 27
-#define R9A07G054_PD_SSI0 28
-#define R9A07G054_PD_SSI1 29
-#define R9A07G054_PD_SSI2 30
-#define R9A07G054_PD_SSI3 31
-#define R9A07G054_PD_SRC 32
-#define R9A07G054_PD_USB0 33
-#define R9A07G054_PD_USB1 34
-#define R9A07G054_PD_USB_PHY 35
-#define R9A07G054_PD_ETHER0 36
-#define R9A07G054_PD_ETHER1 37
-#define R9A07G054_PD_I2C0 38
-#define R9A07G054_PD_I2C1 39
-#define R9A07G054_PD_I2C2 40
-#define R9A07G054_PD_I2C3 41
-#define R9A07G054_PD_SCIF0 42
-#define R9A07G054_PD_SCIF1 43
-#define R9A07G054_PD_SCIF2 44
-#define R9A07G054_PD_SCIF3 45
-#define R9A07G054_PD_SCIF4 46
-#define R9A07G054_PD_SCI0 47
-#define R9A07G054_PD_SCI1 48
-#define R9A07G054_PD_IRDA 49
-#define R9A07G054_PD_RSPI0 50
-#define R9A07G054_PD_RSPI1 51
-#define R9A07G054_PD_RSPI2 52
-#define R9A07G054_PD_CANFD 53
-#define R9A07G054_PD_ADC 54
-#define R9A07G054_PD_TSU 55
-
#endif /* __DT_BINDINGS_CLOCK_R9A07G054_CPG_H__ */
diff --git a/include/dt-bindings/clock/r9a08g045-cpg.h b/include/dt-bindings/clock/r9a08g045-cpg.h
index 311521fe4b59..410725b778a8 100644
--- a/include/dt-bindings/clock/r9a08g045-cpg.h
+++ b/include/dt-bindings/clock/r9a08g045-cpg.h
@@ -239,75 +239,4 @@
#define R9A08G045_I3C_PRESETN 92
#define R9A08G045_VBAT_BRESETN 93
-/* Power domain IDs. */
-#define R9A08G045_PD_ALWAYS_ON 0
-#define R9A08G045_PD_GIC 1
-#define R9A08G045_PD_IA55 2
-#define R9A08G045_PD_MHU 3
-#define R9A08G045_PD_CORESIGHT 4
-#define R9A08G045_PD_SYC 5
-#define R9A08G045_PD_DMAC 6
-#define R9A08G045_PD_GTM0 7
-#define R9A08G045_PD_GTM1 8
-#define R9A08G045_PD_GTM2 9
-#define R9A08G045_PD_GTM3 10
-#define R9A08G045_PD_GTM4 11
-#define R9A08G045_PD_GTM5 12
-#define R9A08G045_PD_GTM6 13
-#define R9A08G045_PD_GTM7 14
-#define R9A08G045_PD_MTU 15
-#define R9A08G045_PD_POE3 16
-#define R9A08G045_PD_GPT 17
-#define R9A08G045_PD_POEGA 18
-#define R9A08G045_PD_POEGB 19
-#define R9A08G045_PD_POEGC 20
-#define R9A08G045_PD_POEGD 21
-#define R9A08G045_PD_WDT0 22
-#define R9A08G045_PD_XSPI 23
-#define R9A08G045_PD_SDHI0 24
-#define R9A08G045_PD_SDHI1 25
-#define R9A08G045_PD_SDHI2 26
-#define R9A08G045_PD_SSI0 27
-#define R9A08G045_PD_SSI1 28
-#define R9A08G045_PD_SSI2 29
-#define R9A08G045_PD_SSI3 30
-#define R9A08G045_PD_SRC 31
-#define R9A08G045_PD_USB0 32
-#define R9A08G045_PD_USB1 33
-#define R9A08G045_PD_USB_PHY 34
-#define R9A08G045_PD_ETHER0 35
-#define R9A08G045_PD_ETHER1 36
-#define R9A08G045_PD_I2C0 37
-#define R9A08G045_PD_I2C1 38
-#define R9A08G045_PD_I2C2 39
-#define R9A08G045_PD_I2C3 40
-#define R9A08G045_PD_SCIF0 41
-#define R9A08G045_PD_SCIF1 42
-#define R9A08G045_PD_SCIF2 43
-#define R9A08G045_PD_SCIF3 44
-#define R9A08G045_PD_SCIF4 45
-#define R9A08G045_PD_SCIF5 46
-#define R9A08G045_PD_SCI0 47
-#define R9A08G045_PD_SCI1 48
-#define R9A08G045_PD_IRDA 49
-#define R9A08G045_PD_RSPI0 50
-#define R9A08G045_PD_RSPI1 51
-#define R9A08G045_PD_RSPI2 52
-#define R9A08G045_PD_RSPI3 53
-#define R9A08G045_PD_RSPI4 54
-#define R9A08G045_PD_CANFD 55
-#define R9A08G045_PD_ADC 56
-#define R9A08G045_PD_TSU 57
-#define R9A08G045_PD_OCTA 58
-#define R9A08G045_PD_PDM 59
-#define R9A08G045_PD_PCI 60
-#define R9A08G045_PD_SPDIF 61
-#define R9A08G045_PD_I3C 62
-#define R9A08G045_PD_VBAT 63
-
-#define R9A08G045_PD_DDR 64
-#define R9A08G045_PD_TZCDDR 65
-#define R9A08G045_PD_OTFDE_DDR 66
-#define R9A08G045_PD_RTC 67
-
#endif /* __DT_BINDINGS_CLOCK_R9A08G045_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
new file mode 100644
index 000000000000..7ecc4f0b235a
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g077-cpg-mssr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A09G077 CPG Core Clocks */
+#define R9A09G077_CLK_CA55C0 0
+#define R9A09G077_CLK_CA55C1 1
+#define R9A09G077_CLK_CA55C2 2
+#define R9A09G077_CLK_CA55C3 3
+#define R9A09G077_CLK_CA55S 4
+#define R9A09G077_CLK_CR52_CPU0 5
+#define R9A09G077_CLK_CR52_CPU1 6
+#define R9A09G077_CLK_CKIO 7
+#define R9A09G077_CLK_PCLKAH 8
+#define R9A09G077_CLK_PCLKAM 9
+#define R9A09G077_CLK_PCLKAL 10
+#define R9A09G077_CLK_PCLKGPTL 11
+#define R9A09G077_CLK_PCLKH 12
+#define R9A09G077_CLK_PCLKM 13
+#define R9A09G077_CLK_PCLKL 14
+#define R9A09G077_SDHI_CLKHS 15
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G077_CPG_H__ */
diff --git a/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
new file mode 100644
index 000000000000..925e57703925
--- /dev/null
+++ b/include/dt-bindings/clock/renesas,r9a09g087-cpg-mssr.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+ *
+ * Copyright (C) 2025 Renesas Electronics Corp.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__
+#define __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* R9A09G087 CPG Core Clocks */
+#define R9A09G087_CLK_CA55C0 0
+#define R9A09G087_CLK_CA55C1 1
+#define R9A09G087_CLK_CA55C2 2
+#define R9A09G087_CLK_CA55C3 3
+#define R9A09G087_CLK_CA55S 4
+#define R9A09G087_CLK_CR52_CPU0 5
+#define R9A09G087_CLK_CR52_CPU1 6
+#define R9A09G087_CLK_CKIO 7
+#define R9A09G087_CLK_PCLKAH 8
+#define R9A09G087_CLK_PCLKAM 9
+#define R9A09G087_CLK_PCLKAL 10
+#define R9A09G087_CLK_PCLKGPTL 11
+#define R9A09G087_CLK_PCLKH 12
+#define R9A09G087_CLK_PCLKM 13
+#define R9A09G087_CLK_PCLKL 14
+#define R9A09G087_SDHI_CLKHS 15
+
+#endif /* __DT_BINDINGS_CLOCK_RENESAS_R9A09G087_CPG_H__ */
diff --git a/include/dt-bindings/clock/samsung,exynosautov920.h b/include/dt-bindings/clock/samsung,exynosautov920.h
index 5e6896e9627f..93e6233d1358 100644
--- a/include/dt-bindings/clock/samsung,exynosautov920.h
+++ b/include/dt-bindings/clock/samsung,exynosautov920.h
@@ -286,4 +286,13 @@
#define CLK_MOUT_HSI1_USBDRD_USER 3
#define CLK_MOUT_HSI1_USBDRD 4
+/* CMU_HSI2 */
+#define FOUT_PLL_ETH 1
+#define CLK_MOUT_HSI2_NOC_UFS_USER 2
+#define CLK_MOUT_HSI2_UFS_EMBD_USER 3
+#define CLK_MOUT_HSI2_ETHERNET 4
+#define CLK_MOUT_HSI2_ETHERNET_USER 5
+#define CLK_DOUT_HSI2_ETHERNET 6
+#define CLK_DOUT_HSI2_ETHERNET_PTP 7
+
#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV920_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index d9b7bac30953..f15bcee7c928 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -240,6 +240,7 @@
#define RPMH_REGULATOR_LEVEL_TURBO_L2 432
#define RPMH_REGULATOR_LEVEL_TURBO_L3 448
#define RPMH_REGULATOR_LEVEL_TURBO_L4 452
+#define RPMH_REGULATOR_LEVEL_TURBO_L5 456
#define RPMH_REGULATOR_LEVEL_SUPER_TURBO 464
#define RPMH_REGULATOR_LEVEL_SUPER_TURBO_NO_CPR 480
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 4a34f7f0a864..404883c7af6e 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -38,6 +38,7 @@
enum vgic_type {
VGIC_V2, /* Good ol' GICv2 */
VGIC_V3, /* New fancy GICv3 */
+ VGIC_V5, /* Newer, fancier GICv5 */
};
/* same for all guests, as depending only on the _host's_ GIC model */
@@ -77,9 +78,12 @@ struct vgic_global {
/* Pseudo GICv3 from outer space */
bool no_hw_deactivation;
- /* GIC system register CPU interface */
+ /* GICv3 system register CPU interface */
struct static_key_false gicv3_cpuif;
+ /* GICv3 compat mode on a GICv5 host */
+ bool has_gcie_v3_compat;
+
u32 ich_vtr_el2;
};
@@ -264,6 +268,9 @@ struct vgic_dist {
/* distributor enabled */
bool enabled;
+ /* Supports SGIs without active state */
+ bool nassgicap;
+
/* Wants SGIs without active state */
bool nassgireq;
@@ -434,7 +441,7 @@ struct kvm_kernel_irq_routing_entry;
int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
-int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
+void kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq);
int vgic_v4_load(struct kvm_vcpu *vcpu);
void vgic_v4_commit(struct kvm_vcpu *vcpu);
diff --git a/include/linux/adi-axi-common.h b/include/linux/adi-axi-common.h
new file mode 100644
index 000000000000..f64f4ad4beda
--- /dev/null
+++ b/include/linux/adi-axi-common.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Analog Devices AXI common registers & definitions
+ *
+ * Copyright 2019 Analog Devices Inc.
+ *
+ * https://wiki.analog.com/resources/fpga/docs/axi_ip
+ * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
+ */
+
+#ifndef ADI_AXI_COMMON_H_
+#define ADI_AXI_COMMON_H_
+
+#define ADI_AXI_REG_VERSION 0x0000
+#define ADI_AXI_REG_FPGA_INFO 0x001C
+
+#define ADI_AXI_PCORE_VER(major, minor, patch) \
+ (((major) << 16) | ((minor) << 8) | (patch))
+
+#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
+#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
+#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
+
+#define ADI_AXI_INFO_FPGA_TECH(info) (((info) >> 24) & 0xff)
+#define ADI_AXI_INFO_FPGA_FAMILY(info) (((info) >> 16) & 0xff)
+#define ADI_AXI_INFO_FPGA_SPEED_GRADE(info) (((info) >> 8) & 0xff)
+
+enum adi_axi_fpga_technology {
+ ADI_AXI_FPGA_TECH_UNKNOWN = 0,
+ ADI_AXI_FPGA_TECH_SERIES7,
+ ADI_AXI_FPGA_TECH_ULTRASCALE,
+ ADI_AXI_FPGA_TECH_ULTRASCALE_PLUS,
+};
+
+enum adi_axi_fpga_family {
+ ADI_AXI_FPGA_FAMILY_UNKNOWN = 0,
+ ADI_AXI_FPGA_FAMILY_ARTIX,
+ ADI_AXI_FPGA_FAMILY_KINTEX,
+ ADI_AXI_FPGA_FAMILY_VIRTEX,
+ ADI_AXI_FPGA_FAMILY_ZYNQ,
+};
+
+enum adi_axi_fpga_speed_grade {
+ ADI_AXI_FPGA_SPEED_UNKNOWN = 0,
+ ADI_AXI_FPGA_SPEED_1 = 10,
+ ADI_AXI_FPGA_SPEED_1L = 11,
+ ADI_AXI_FPGA_SPEED_1H = 12,
+ ADI_AXI_FPGA_SPEED_1HV = 13,
+ ADI_AXI_FPGA_SPEED_1LV = 14,
+ ADI_AXI_FPGA_SPEED_2 = 20,
+ ADI_AXI_FPGA_SPEED_2L = 21,
+ ADI_AXI_FPGA_SPEED_2LV = 22,
+ ADI_AXI_FPGA_SPEED_3 = 30,
+};
+
+#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/alloc_tag.h b/include/linux/alloc_tag.h
index 8f7931eb7d16..9ef2633e2c08 100644
--- a/include/linux/alloc_tag.h
+++ b/include/linux/alloc_tag.h
@@ -88,7 +88,7 @@ static inline struct alloc_tag *ct_to_alloc_tag(struct codetag *ct)
return container_of(ct, struct alloc_tag, ct);
}
-#ifdef ARCH_NEEDS_WEAK_PER_CPU
+#if defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)
/*
* When percpu variables are required to be defined as weak, static percpu
* variables can't be used inside a function (see comments for DECLARE_PER_CPU_SECTION).
@@ -102,7 +102,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
.ct = CODE_TAG_INIT, \
.counters = &_shared_alloc_tag };
-#else /* ARCH_NEEDS_WEAK_PER_CPU */
+#else /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
#ifdef MODULE
@@ -123,7 +123,7 @@ DECLARE_PER_CPU(struct alloc_tag_counters, _shared_alloc_tag);
#endif /* MODULE */
-#endif /* ARCH_NEEDS_WEAK_PER_CPU */
+#endif /* CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU && MODULE */
DECLARE_STATIC_KEY_MAYBE(CONFIG_MEM_ALLOC_PROFILING_ENABLED_BY_DEFAULT,
mem_alloc_profiling_key);
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 062fbd4c9b77..8cced632ecd0 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -12,20 +12,6 @@
struct amd_iommu;
-/*
- * This is mainly used to communicate information back-and-forth
- * between SVM and IOMMU for setting up and tearing down posted
- * interrupt
- */
-struct amd_iommu_pi_data {
- u32 ga_tag;
- u32 prev_ga_tag;
- u64 base;
- bool is_guest_mode;
- struct vcpu_data *vcpu_data;
- void *ir_data;
-};
-
#ifdef CONFIG_AMD_IOMMU
struct task_struct;
@@ -44,10 +30,8 @@ static inline void amd_iommu_detect(void) { }
/* IOMMU AVIC Function */
extern int amd_iommu_register_ga_log_notifier(int (*notifier)(u32));
-extern int
-amd_iommu_update_ga(int cpu, bool is_run, void *data);
-
-extern int amd_iommu_activate_guest_mode(void *data);
+extern int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr);
+extern int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr);
extern int amd_iommu_deactivate_guest_mode(void *data);
#else /* defined(CONFIG_AMD_IOMMU) && defined(CONFIG_IRQ_REMAP) */
@@ -58,13 +42,12 @@ amd_iommu_register_ga_log_notifier(int (*notifier)(u32))
return 0;
}
-static inline int
-amd_iommu_update_ga(int cpu, bool is_run, void *data)
+static inline int amd_iommu_update_ga(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
-static inline int amd_iommu_activate_guest_mode(void *data)
+static inline int amd_iommu_activate_guest_mode(void *data, int cpu, bool ga_log_intr)
{
return 0;
}
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 5ca2d5699620..7cfe48769239 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -4,12 +4,13 @@
*
* Common interface definitions for making balloon pages movable by compaction.
*
- * Balloon page migration makes use of the general non-lru movable page
+ * Balloon page migration makes use of the general "movable_ops page migration"
* feature.
*
* page->private is used to reference the responsible balloon device.
- * page->mapping is used in context of non-lru page migration to reference
- * the address space operations for page isolation/migration/compaction.
+ * That these pages have movable_ops, and which movable_ops apply,
+ * is derived from the page type (PageOffline()) combined with the
+ * PG_movable_ops flag (PageMovableOps()).
*
* As the page isolation scanning step a compaction thread does is a lockless
* procedure (from a page standpoint), it might bring some racy situations while
@@ -17,12 +18,10 @@
* and safely perform balloon's page compaction and migration we must, always,
* ensure following these simple rules:
*
- * i. when updating a balloon's page ->mapping element, strictly do it under
- * the following lock order, independently of the far superior
- * locking scheme (lru_lock, balloon_lock):
+ * i. Setting the PG_movable_ops flag and page->private with the following
+ * lock order
* +-page_lock(page);
* +--spin_lock_irq(&b_dev_info->pages_lock);
- * ... page->mapping updates here ...
*
* ii. isolation or dequeueing procedure must remove the page from balloon
* device page list under b_dev_info->pages_lock.
@@ -78,6 +77,15 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
#ifdef CONFIG_BALLOON_COMPACTION
extern const struct movable_operations balloon_mops;
+/*
+ * balloon_page_device - get the b_dev_info descriptor for the balloon device
+ * that enqueues the given page.
+ */
+static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+{
+ return (struct balloon_dev_info *)page_private(page);
+}
+#endif /* CONFIG_BALLOON_COMPACTION */
/*
* balloon_page_insert - insert a page into the balloon's page list and make
@@ -92,68 +100,34 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageOffline(page);
- __SetPageMovable(page, &balloon_mops);
- set_page_private(page, (unsigned long)balloon);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION)) {
+ SetPageMovableOps(page);
+ set_page_private(page, (unsigned long)balloon);
+ }
list_add(&page->lru, &balloon->pages);
}
-/*
- * balloon_page_delete - delete a page from balloon's page list and clear
- * the page->private assignement accordingly.
- * @page : page to be released from balloon's page list
- *
- * Caller must ensure the page is locked and the spin_lock protecting balloon
- * pages list is held before deleting a page from the balloon device.
- */
-static inline void balloon_page_delete(struct page *page)
+static inline gfp_t balloon_mapping_gfp_mask(void)
{
- __ClearPageOffline(page);
- __ClearPageMovable(page);
- set_page_private(page, 0);
- /*
- * No touch page.lru field once @page has been isolated
- * because VM is using the field.
- */
- if (!PageIsolated(page))
- list_del(&page->lru);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ return GFP_HIGHUSER_MOVABLE;
+ return GFP_HIGHUSER;
}
/*
- * balloon_page_device - get the b_dev_info descriptor for the balloon device
- * that enqueues the given page.
+ * balloon_page_finalize - prepare a balloon page that was removed from the
+ * balloon list for release to the page allocator
+ * @page: page to be released to the page allocator
+ *
+ * Caller must ensure that the page is locked.
*/
-static inline struct balloon_dev_info *balloon_page_device(struct page *page)
+static inline void balloon_page_finalize(struct page *page)
{
- return (struct balloon_dev_info *)page_private(page);
+ if (IS_ENABLED(CONFIG_BALLOON_COMPACTION))
+ set_page_private(page, 0);
+ /* PageOffline is sticky until the page is freed to the buddy. */
}
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER_MOVABLE;
-}
-
-#else /* !CONFIG_BALLOON_COMPACTION */
-
-static inline void balloon_page_insert(struct balloon_dev_info *balloon,
- struct page *page)
-{
- __SetPageOffline(page);
- list_add(&page->lru, &balloon->pages);
-}
-
-static inline void balloon_page_delete(struct page *page)
-{
- __ClearPageOffline(page);
- list_del(&page->lru);
-}
-
-static inline gfp_t balloon_mapping_gfp_mask(void)
-{
- return GFP_HIGHUSER;
-}
-
-#endif /* CONFIG_BALLOON_COMPACTION */
-
/*
* balloon_page_push - insert a page into a page list.
* @head : pointer to list
diff --git a/include/linux/bcm47xx_nvram.h b/include/linux/bcm47xx_nvram.h
index 7615f8d7b1ed..e4b6ce953ddb 100644
--- a/include/linux/bcm47xx_nvram.h
+++ b/include/linux/bcm47xx_nvram.h
@@ -7,7 +7,6 @@
#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
#ifdef CONFIG_BCM47XX_NVRAM
diff --git a/include/linux/bcm47xx_sprom.h b/include/linux/bcm47xx_sprom.h
index f8254fd53e15..40a7da3ef50e 100644
--- a/include/linux/bcm47xx_sprom.h
+++ b/include/linux/bcm47xx_sprom.h
@@ -5,8 +5,8 @@
#ifndef __BCM47XX_SPROM_H
#define __BCM47XX_SPROM_H
+#include <linux/errno.h>
#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/vmalloc.h>
struct ssb_sprom;
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index 6d9a53db54b6..5355f8f806a9 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -189,14 +189,14 @@ static __always_inline u64 field_mask(u64 field)
}
#define field_max(field) ((typeof(field))field_mask(field))
#define ____MAKE_OP(type,base,to,from) \
-static __always_inline __##type type##_encode_bits(base v, base field) \
+static __always_inline __##type __must_check type##_encode_bits(base v, base field) \
{ \
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
__field_overflow(); \
return to((v & field_mask(field)) * field_multiplier(field)); \
} \
-static __always_inline __##type type##_replace_bits(__##type old, \
- base val, base field) \
+static __always_inline __##type __must_check type##_replace_bits(__##type old, \
+ base val, base field) \
{ \
return (old & ~to(field)) | type##_encode_bits(val, field); \
} \
@@ -205,7 +205,7 @@ static __always_inline void type##p_replace_bits(__##type *p, \
{ \
*p = (*p & ~to(field)) | type##_encode_bits(val, field); \
} \
-static __always_inline base type##_get_bits(__##type v, base field) \
+static __always_inline base __must_check type##_get_bits(__##type v, base field) \
{ \
return (from(v) & field)/field_multiplier(field); \
}
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 7ad056219115..a40cc861b3a7 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -2,10 +2,8 @@
#ifndef __LINUX_BITS_H
#define __LINUX_BITS_H
-#include <linux/const.h>
#include <vdso/bits.h>
#include <uapi/linux/bits.h>
-#include <asm/bitsperlong.h>
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
@@ -50,10 +48,14 @@
(type_max(t) << (l) & \
type_max(t) >> (BITS_PER_TYPE(t) - 1 - (h)))))
+#define GENMASK(h, l) GENMASK_TYPE(unsigned long, h, l)
+#define GENMASK_ULL(h, l) GENMASK_TYPE(unsigned long long, h, l)
+
#define GENMASK_U8(h, l) GENMASK_TYPE(u8, h, l)
#define GENMASK_U16(h, l) GENMASK_TYPE(u16, h, l)
#define GENMASK_U32(h, l) GENMASK_TYPE(u32, h, l)
#define GENMASK_U64(h, l) GENMASK_TYPE(u64, h, l)
+#define GENMASK_U128(h, l) GENMASK_TYPE(u128, h, l)
/*
* Fixed-type variants of BIT(), with additional checks like GENMASK_TYPE(). The
@@ -79,28 +81,9 @@
* BUILD_BUG_ON_ZERO is not available in h files included from asm files,
* disable the input check if that is the case.
*/
-#define GENMASK_INPUT_CHECK(h, l) 0
+#define GENMASK(h, l) __GENMASK(h, l)
+#define GENMASK_ULL(h, l) __GENMASK_ULL(h, l)
#endif /* !defined(__ASSEMBLY__) */
-#define GENMASK(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK(h, l))
-#define GENMASK_ULL(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_ULL(h, l))
-
-#if !defined(__ASSEMBLY__)
-/*
- * Missing asm support
- *
- * __GENMASK_U128() depends on _BIT128() which would not work
- * in the asm code, as it shifts an 'unsigned __int128' data
- * type instead of direct representation of 128 bit constants
- * such as long and unsigned long. The fundamental problem is
- * that a 128 bit constant will get silently truncated by the
- * gcc compiler.
- */
-#define GENMASK_U128(h, l) \
- (GENMASK_INPUT_CHECK(h, l) + __GENMASK_U128(h, l))
-#endif
-
#endif /* __LINUX_BITS_H */
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index e61687d5e496..6b93a64115fe 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -375,15 +375,12 @@ struct css_rstat_cpu {
* Child cgroups with stat updates on this cpu since the last read
* are linked on the parent's ->updated_children through
* ->updated_next. updated_children is terminated by its container css.
- *
- * In addition to being more compact, singly-linked list pointing to
- * the css makes it unnecessary for each per-cpu struct to point back
- * to the associated css.
- *
- * Protected by per-cpu css->ss->rstat_ss_cpu_lock.
*/
struct cgroup_subsys_state *updated_children;
struct cgroup_subsys_state *updated_next; /* NULL if not on the list */
+
+ struct llist_node lnode; /* lockless list for update */
+ struct cgroup_subsys_state *owner; /* back pointer */
};
/*
@@ -821,7 +818,7 @@ struct cgroup_subsys {
unsigned int depends_on;
spinlock_t rstat_ss_lock;
- raw_spinlock_t __percpu *rstat_ss_cpu_lock;
+ struct llist_head __percpu *lhead; /* lockless update list head */
};
extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
@@ -898,14 +895,12 @@ static inline u16 sock_cgroup_prioidx(const struct sock_cgroup_data *skcd)
#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline u32 sock_cgroup_classid(const struct sock_cgroup_data *skcd)
{
-#ifdef CONFIG_CGROUP_NET_CLASSID
return READ_ONCE(skcd->classid);
-#else
- return 0;
-#endif
}
+#endif
static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
u16 prioidx)
@@ -915,13 +910,13 @@ static inline void sock_cgroup_set_prioidx(struct sock_cgroup_data *skcd,
#endif
}
+#ifdef CONFIG_CGROUP_NET_CLASSID
static inline void sock_cgroup_set_classid(struct sock_cgroup_data *skcd,
u32 classid)
{
-#ifdef CONFIG_CGROUP_NET_CLASSID
WRITE_ONCE(skcd->classid, classid);
-#endif
}
+#endif
#else /* CONFIG_SOCK_CGROUP_DATA */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2e6e603b7493..630705a47129 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1360,6 +1360,32 @@ void clk_hw_unregister(struct clk_hw *hw);
/* helper functions */
const char *__clk_get_name(const struct clk *clk);
const char *clk_hw_get_name(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_dev() - get device from an hardware clock.
+ * @hw: the clk_hw pointer to get the struct device from
+ *
+ * This is a helper to get the struct device associated with a hardware
+ * clock. Some clock controllers, such as the one registered with
+ * CLK_OF_DECLARE(), may have not provided a device pointer while
+ * registering the clock.
+ *
+ * Return: the struct device associated with the clock, or NULL if there
+ * is none.
+ */
+struct device *clk_hw_get_dev(const struct clk_hw *hw);
+
+/**
+ * clk_hw_get_of_node() - get device_node from a hardware clock.
+ * @hw: the clk_hw pointer to get the struct device_node from
+ *
+ * This is a helper to get the struct device_node associated with a
+ * hardware clock.
+ *
+ * Return: the struct device_node associated with the clock, or NULL
+ * if there is none.
+ */
+struct device_node *clk_hw_get_of_node(const struct clk_hw *hw);
#ifdef CONFIG_COMMON_CLK
struct clk_hw *__clk_get_hw(struct clk *clk);
#else
diff --git a/include/linux/codetag.h b/include/linux/codetag.h
index 5f2b9a1f722c..457ed8fd3214 100644
--- a/include/linux/codetag.h
+++ b/include/linux/codetag.h
@@ -54,6 +54,7 @@ struct codetag_iterator {
struct codetag_module *cmod;
unsigned long mod_id;
struct codetag *ct;
+ unsigned long mod_seq;
};
#ifdef MODULE
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index 96e8a66da133..68861da4cf7c 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -10,7 +10,7 @@
#ifdef CONFIG_COREDUMP
struct core_vma_metadata {
unsigned long start, end;
- unsigned long flags;
+ vm_flags_t flags;
unsigned long dump_size;
unsigned long pgoff;
struct file *file;
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 7ae80a7ca81e..ff8f41ab7ce6 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -355,6 +355,18 @@ unsigned int cpumask_next_wrap(int n, const struct cpumask *src)
}
/**
+ * cpumask_random - get random cpu in *src.
+ * @src: cpumask pointer
+ *
+ * Return: random set bit, or >= nr_cpu_ids if @src is empty.
+ */
+static __always_inline
+unsigned int cpumask_random(const struct cpumask *src)
+{
+ return find_random_bit(cpumask_bits(src), nr_cpu_ids);
+}
+
+/**
* for_each_cpu - iterate over every cpu in a mask
* @cpu: the (optionally unsigned) integer iterator
* @mask: the cpumask pointer
@@ -547,22 +559,6 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
}
/**
- * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
- * @srcp1: the cpumask pointer
- * @srcp2: the cpumask pointer
- * @cpu: the Nth cpu to find, starting from 0
- *
- * Return: >= nr_cpu_ids if such cpu doesn't exist.
- */
-static __always_inline
-unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
- const struct cpumask *srcp2)
-{
- return find_nth_andnot_bit(cpumask_bits(srcp1), cpumask_bits(srcp2),
- small_cpumask_bits, cpumask_check(cpu));
-}
-
-/**
* cpumask_nth_and_andnot - get the Nth cpu set in 1st and 2nd cpumask, and clear in 3rd.
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
@@ -609,6 +605,18 @@ void __cpumask_set_cpu(unsigned int cpu, struct cpumask *dstp)
__set_bit(cpumask_check(cpu), cpumask_bits(dstp));
}
+/**
+ * cpumask_clear_cpus - clear cpus in a cpumask
+ * @dstp: the cpumask pointer
+ * @cpu: cpu number (< nr_cpu_ids)
+ * @ncpus: number of cpus to clear (< nr_cpu_ids)
+ */
+static __always_inline void cpumask_clear_cpus(struct cpumask *dstp,
+ unsigned int cpu, unsigned int ncpus)
+{
+ cpumask_check(cpu + ncpus - 1);
+ bitmap_clear(cpumask_bits(dstp), cpumask_check(cpu), ncpus);
+}
/**
* cpumask_clear_cpu - clear a cpu in a cpumask
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index b50f1954d1bb..a2137e19be7d 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -136,6 +136,9 @@
/* Set if the algorithm supports virtual addresses. */
#define CRYPTO_ALG_REQ_VIRT 0x00040000
+/* Set if the algorithm cannot have a fallback (e.g., phmac). */
+#define CRYPTO_ALG_NO_FALLBACK 0x00080000
+
/* The high bits 0xff000000 are reserved for type-specific flags. */
/*
diff --git a/include/linux/damon.h b/include/linux/damon.h
index a4011726cb3b..f13664c62ddd 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -448,12 +448,29 @@ struct damos_access_pattern {
};
/**
+ * struct damos_migrate_dests - Migration destination nodes and their weights.
+ * @node_id_arr: Array of migration destination node ids.
+ * @weight_arr: Array of migration weights for @node_id_arr.
+ * @nr_dests: Length of the @node_id_arr and @weight_arr arrays.
+ *
+ * @node_id_arr is an array of the ids of migration destination nodes.
+ * @weight_arr is an array of the weights for those. The weights in
+ * @weight_arr are for nodes in @node_id_arr of same array index.
+ */
+struct damos_migrate_dests {
+ unsigned int *node_id_arr;
+ unsigned int *weight_arr;
+ size_t nr_dests;
+};
+
+/**
* struct damos - Represents a Data Access Monitoring-based Operation Scheme.
* @pattern: Access pattern of target regions.
- * @action: &damo_action to be applied to the target regions.
+ * @action: &damos_action to be applied to the target regions.
* @apply_interval_us: The time between applying the @action.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
+ * @migrate_dests: Destination nodes if @action is "migrate_{hot,cold}".
* @target_nid: Destination node if @action is "migrate_{hot,cold}".
* @filters: Additional set of &struct damos_filter for &action.
* @ops_filters: ops layer handling &struct damos_filter objects list.
@@ -472,9 +489,12 @@ struct damos_access_pattern {
* monitoring context are inactive, DAMON stops monitoring either, and just
* repeatedly checks the watermarks.
*
+ * @migrate_dests specifies multiple migration target nodes with different
+ * weights for migrate_hot or migrate_cold actions. @target_nid is ignored if
+ * this is set.
+ *
* @target_nid is used to set the migration target node for migrate_hot or
- * migrate_cold actions, which means it's only meaningful when @action is either
- * "migrate_hot" or "migrate_cold".
+ * migrate_cold actions, and @migrate_dests is unset.
*
* Before applying the &action to a memory region, &struct damon_operations
* implementation could check pages of the region and skip &action to respect
@@ -517,7 +537,10 @@ struct damos {
struct damos_quota quota;
struct damos_watermarks wmarks;
union {
- int target_nid;
+ struct {
+ int target_nid;
+ struct damos_migrate_dests migrate_dests;
+ };
};
struct list_head filters;
struct list_head ops_filters;
@@ -553,6 +576,7 @@ enum damon_ops_id {
* @get_scheme_score: Get the score of a region for a scheme.
* @apply_scheme: Apply a DAMON-based operation scheme.
* @target_valid: Determine if the target is valid.
+ * @cleanup_target: Clean up each target before deallocation.
* @cleanup: Clean up the context.
*
* DAMON can be extended for various address spaces and usages. For this,
@@ -585,6 +609,7 @@ enum damon_ops_id {
* filters (&struct damos_filter) that handled by itself.
* @target_valid should check whether the target is still valid for the
* monitoring.
+ * @cleanup_target is called before the target will be deallocated.
* @cleanup is called from @kdamond just before its termination.
*/
struct damon_operations {
@@ -600,42 +625,16 @@ struct damon_operations {
struct damon_target *t, struct damon_region *r,
struct damos *scheme, unsigned long *sz_filter_passed);
bool (*target_valid)(struct damon_target *t);
+ void (*cleanup_target)(struct damon_target *t);
void (*cleanup)(struct damon_ctx *context);
};
-/**
- * struct damon_callback - Monitoring events notification callbacks.
- *
- * @after_wmarks_check: Called after each schemes' watermarks check.
- * @after_aggregation: Called after each aggregation.
- * @before_terminate: Called before terminating the monitoring.
- *
- * The monitoring thread (&damon_ctx.kdamond) calls @before_terminate just
- * before finishing the monitoring.
- *
- * The monitoring thread calls @after_wmarks_check after each DAMON-based
- * operation schemes' watermarks check. If users need to make changes to the
- * attributes of the monitoring context while it's deactivated due to the
- * watermarks, this is the good place to do.
- *
- * The monitoring thread calls @after_aggregation for each of the aggregation
- * intervals. Therefore, users can safely access the monitoring results
- * without additional protection. For the reason, users are recommended to use
- * these callback for the accesses to the results.
- *
- * If any callback returns non-zero, monitoring stops.
- */
-struct damon_callback {
- int (*after_wmarks_check)(struct damon_ctx *context);
- int (*after_aggregation)(struct damon_ctx *context);
- void (*before_terminate)(struct damon_ctx *context);
-};
-
/*
* struct damon_call_control - Control damon_call().
*
* @fn: Function to be called back.
* @data: Data that will be passed to @fn.
+ * @repeat: Repeat invocations.
* @return_code: Return code from @fn invocation.
*
* Control damon_call(), which requests specific kdamond to invoke a given
@@ -644,19 +643,22 @@ struct damon_callback {
struct damon_call_control {
int (*fn)(void *data);
void *data;
+ bool repeat;
int return_code;
/* private: internal use only */
/* informs if the kdamond finished handling of the request */
struct completion completion;
/* informs if the kdamond canceled @fn infocation */
bool canceled;
+ /* List head for siblings. */
+ struct list_head list;
};
/**
* struct damon_intervals_goal - Monitoring intervals auto-tuning goal.
*
* @access_bp: Access events observation ratio to achieve in bp.
- * @aggrs: Number of aggregations to acheive @access_bp within.
+ * @aggrs: Number of aggregations to achieve @access_bp within.
* @min_sample_us: Minimum resulting sampling interval in microseconds.
* @max_sample_us: Maximum resulting sampling interval in microseconds.
*
@@ -697,7 +699,7 @@ struct damon_intervals_goal {
* ``mmap()`` calls from the application, in case of virtual memory monitoring)
* and applies the changes for each @ops_update_interval. All time intervals
* are in micro-seconds. Please refer to &struct damon_operations and &struct
- * damon_callback for more detail.
+ * damon_call_control for more detail.
*/
struct damon_attrs {
unsigned long sample_interval;
@@ -744,7 +746,6 @@ struct damon_attrs {
* Accesses to other fields must be protected by themselves.
*
* @ops: Set of monitoring operations for given use cases.
- * @callback: Set of callbacks for monitoring events notifications.
*
* @adaptive_targets: Head of monitoring targets (&damon_target) list.
* @schemes: Head of schemes (&damos) list.
@@ -775,8 +776,9 @@ struct damon_ctx {
/* for scheme quotas prioritization */
unsigned long *regions_score_histogram;
- struct damon_call_control *call_control;
- struct mutex call_control_lock;
+ /* lists of &struct damon_call_control */
+ struct list_head call_controls;
+ struct mutex call_controls_lock;
struct damos_walk_control *walk_control;
struct mutex walk_control_lock;
@@ -786,7 +788,6 @@ struct damon_ctx {
struct mutex kdamond_lock;
struct damon_operations ops;
- struct damon_callback callback;
struct list_head adaptive_targets;
struct list_head schemes;
@@ -905,7 +906,7 @@ struct damon_target *damon_new_target(void);
void damon_add_target(struct damon_ctx *ctx, struct damon_target *t);
bool damon_targets_empty(struct damon_ctx *ctx);
void damon_free_target(struct damon_target *t);
-void damon_destroy_target(struct damon_target *t);
+void damon_destroy_target(struct damon_target *t, struct damon_ctx *ctx);
unsigned int damon_nr_regions(struct damon_target *t);
struct damon_ctx *damon_new_ctx(void);
@@ -934,6 +935,7 @@ static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
+bool damon_is_running(struct damon_ctx *ctx);
int damon_call(struct damon_ctx *ctx, struct damon_call_control *control);
int damos_walk(struct damon_ctx *ctx, struct damos_walk_control *control);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 78891518291d..9d624f4d9df6 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -26,7 +26,7 @@ struct dax_operations {
* number of pages available for DAX at that pfn.
*/
long (*direct_access)(struct dax_device *, pgoff_t, long,
- enum dax_access_mode, void **, pfn_t *);
+ enum dax_access_mode, void **, unsigned long *);
/* zero_page_range: required operation. Zero page range */
int (*zero_page_range)(struct dax_device *, pgoff_t, size_t);
/*
@@ -243,7 +243,7 @@ static inline void dax_break_layout_final(struct inode *inode)
bool dax_alive(struct dax_device *dax_dev);
void *dax_get_private(struct dax_device *dax_dev);
long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
- enum dax_access_mode mode, void **kaddr, pfn_t *pfn);
+ enum dax_access_mode mode, void **kaddr, unsigned long *pfn);
size_t dax_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
size_t bytes, struct iov_iter *i);
size_t dax_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff, void *addr,
@@ -257,9 +257,10 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
- pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
+ unsigned long *pfnp, int *errp,
+ const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
- unsigned int order, pfn_t pfn);
+ unsigned int order, unsigned long pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
void dax_delete_mapping_range(struct address_space *mapping,
loff_t start, loff_t end);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index cb95951547ab..84fdc3a6a19a 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -156,7 +156,7 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
*/
typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
long nr_pages, enum dax_access_mode node, void **kaddr,
- pfn_t *pfn);
+ unsigned long *pfn);
typedef int (*dm_dax_zero_page_range_fn)(struct dm_target *ti, pgoff_t pgoff,
size_t nr_pages);
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index b12776883d14..64639e104110 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -26,6 +26,7 @@
struct dma_fence;
struct dma_fence_ops;
struct dma_fence_cb;
+struct seq_file;
/**
* struct dma_fence - software synchronization primitive
@@ -97,6 +98,7 @@ struct dma_fence {
};
enum dma_fence_flag_bits {
+ DMA_FENCE_FLAG_SEQNO64_BIT,
DMA_FENCE_FLAG_SIGNALED_BIT,
DMA_FENCE_FLAG_TIMESTAMP_BIT,
DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
@@ -125,14 +127,6 @@ struct dma_fence_cb {
*/
struct dma_fence_ops {
/**
- * @use_64bit_seqno:
- *
- * True if this dma_fence implementation uses 64bit seqno, false
- * otherwise.
- */
- bool use_64bit_seqno;
-
- /**
* @get_driver_name:
*
* Returns the driver name. This is a callback to allow drivers to
@@ -262,6 +256,9 @@ struct dma_fence_ops {
void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
spinlock_t *lock, u64 context, u64 seqno);
+void dma_fence_init64(struct dma_fence *fence, const struct dma_fence_ops *ops,
+ spinlock_t *lock, u64 context, u64 seqno);
+
void dma_fence_release(struct kref *kref);
void dma_fence_free(struct dma_fence *fence);
void dma_fence_describe(struct dma_fence *fence, struct seq_file *seq);
@@ -382,6 +379,29 @@ bool dma_fence_remove_callback(struct dma_fence *fence,
void dma_fence_enable_sw_signaling(struct dma_fence *fence);
/**
+ * DOC: Safe external access to driver provided object members
+ *
+ * All data not stored directly in the dma-fence object, such as the
+ * &dma_fence.lock and memory potentially accessed by functions in the
+ * &dma_fence.ops table, MUST NOT be accessed after the fence has been signalled
+ * because after that point drivers are allowed to free it.
+ *
+ * All code accessing that data via the dma-fence API (or directly, which is
+ * discouraged), MUST make sure to contain the complete access within a
+ * &rcu_read_lock and &rcu_read_unlock pair.
+ *
+ * Some dma-fence API handles this automatically, while other, as for example
+ * &dma_fence_driver_name and &dma_fence_timeline_name, leave that
+ * responsibility to the caller.
+ *
+ * To enable this scheme to work drivers MUST ensure a RCU grace period elapses
+ * between signalling the fence and freeing the said data.
+ *
+ */
+const char __rcu *dma_fence_driver_name(struct dma_fence *fence);
+const char __rcu *dma_fence_timeline_name(struct dma_fence *fence);
+
+/**
* dma_fence_is_signaled_locked - Return an indication if the fence
* is signaled yet.
* @fence: the fence to check
@@ -441,21 +461,20 @@ dma_fence_is_signaled(struct dma_fence *fence)
/**
* __dma_fence_is_later - return if f1 is chronologically later than f2
+ * @fence: fence in whose context to do the comparison
* @f1: the first fence's seqno
* @f2: the second fence's seqno from the same context
- * @ops: dma_fence_ops associated with the seqno
*
* Returns true if f1 is chronologically later than f2. Both fences must be
* from the same context, since a seqno is not common across contexts.
*/
-static inline bool __dma_fence_is_later(u64 f1, u64 f2,
- const struct dma_fence_ops *ops)
+static inline bool __dma_fence_is_later(struct dma_fence *fence, u64 f1, u64 f2)
{
/* This is for backward compatibility with drivers which can only handle
* 32bit sequence numbers. Use a 64bit compare when the driver says to
* do so.
*/
- if (ops->use_64bit_seqno)
+ if (test_bit(DMA_FENCE_FLAG_SEQNO64_BIT, &fence->flags))
return f1 > f2;
return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0;
@@ -475,7 +494,7 @@ static inline bool dma_fence_is_later(struct dma_fence *f1,
if (WARN_ON(f1->context != f2->context))
return false;
- return __dma_fence_is_later(f1->seqno, f2->seqno, f1->ops);
+ return __dma_fence_is_later(f1, f1->seqno, f2->seqno);
}
/**
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 06c4de602b2f..7d40b51933d1 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -60,6 +60,14 @@ static inline struct dma_pool *dma_pool_create(const char *name,
NUMA_NO_NODE);
}
+/**
+ * dma_pool_zalloc - Get a zero-initialized block of DMA coherent memory.
+ * @pool: dma pool that will produce the block
+ * @mem_flags: GFP_* bitmask
+ * @handle: pointer to dma address of block
+ *
+ * Same as dma_pool_alloc(), but the returned memory is zeroed.
+ */
static inline void *dma_pool_zalloc(struct dma_pool *pool, gfp_t mem_flags,
dma_addr_t *handle)
{
diff --git a/include/linux/find.h b/include/linux/find.h
index 5a2c267ea7f9..9d720ad92bc1 100644
--- a/include/linux/find.h
+++ b/include/linux/find.h
@@ -44,6 +44,8 @@ unsigned long _find_next_bit_le(const unsigned long *addr, unsigned
long size, unsigned long offset);
#endif
+unsigned long find_random_bit(const unsigned long *addr, unsigned long size);
+
#ifndef find_next_bit
/**
* find_next_bit - find the next set bit in a memory region
@@ -268,33 +270,6 @@ unsigned long find_nth_and_bit(const unsigned long *addr1, const unsigned long *
}
/**
- * find_nth_andnot_bit - find N'th set bit in 2 memory regions,
- * flipping bits in 2nd region
- * @addr1: The 1st address to start the search at
- * @addr2: The 2nd address to start the search at
- * @size: The maximum number of bits to search
- * @n: The number of set bit, which position is needed, counting from 0
- *
- * Returns the bit number of the N'th set bit.
- * If no such, returns @size.
- */
-static __always_inline
-unsigned long find_nth_andnot_bit(const unsigned long *addr1, const unsigned long *addr2,
- unsigned long size, unsigned long n)
-{
- if (n >= size)
- return size;
-
- if (small_const_nbits(size)) {
- unsigned long val = *addr1 & (~*addr2) & GENMASK(size - 1, 0);
-
- return val ? fns(val, n) : size;
- }
-
- return __find_nth_andnot_bit(addr1, addr2, size, n);
-}
-
-/**
* find_nth_and_andnot_bit - find N'th set bit in 2 memory regions,
* excluding those set in 3rd region
* @addr1: The 1st address to start the search at
diff --git a/include/linux/fpga/adi-axi-common.h b/include/linux/fpga/adi-axi-common.h
deleted file mode 100644
index 141ac3f251e6..000000000000
--- a/include/linux/fpga/adi-axi-common.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Analog Devices AXI common registers & definitions
- *
- * Copyright 2019 Analog Devices Inc.
- *
- * https://wiki.analog.com/resources/fpga/docs/axi_ip
- * https://wiki.analog.com/resources/fpga/docs/hdl/regmap
- */
-
-#ifndef ADI_AXI_COMMON_H_
-#define ADI_AXI_COMMON_H_
-
-#define ADI_AXI_REG_VERSION 0x0000
-
-#define ADI_AXI_PCORE_VER(major, minor, patch) \
- (((major) << 16) | ((minor) << 8) | (patch))
-
-#define ADI_AXI_PCORE_VER_MAJOR(version) (((version) >> 16) & 0xff)
-#define ADI_AXI_PCORE_VER_MINOR(version) (((version) >> 8) & 0xff)
-#define ADI_AXI_PCORE_VER_PATCH(version) ((version) & 0xff)
-
-#endif /* ADI_AXI_COMMON_H_ */
diff --git a/include/linux/fprobe.h b/include/linux/fprobe.h
index 702099f08929..7964db96e41a 100644
--- a/include/linux/fprobe.h
+++ b/include/linux/fprobe.h
@@ -94,6 +94,7 @@ int register_fprobe_ips(struct fprobe *fp, unsigned long *addrs, int num);
int register_fprobe_syms(struct fprobe *fp, const char **syms, int num);
int unregister_fprobe(struct fprobe *fp);
bool fprobe_is_registered(struct fprobe *fp);
+int fprobe_count_ips_from_filter(const char *filter, const char *notfilter);
#else
static inline int register_fprobe(struct fprobe *fp, const char *filter, const char *notfilter)
{
@@ -115,6 +116,10 @@ static inline bool fprobe_is_registered(struct fprobe *fp)
{
return false;
}
+static inline int fprobe_count_ips_from_filter(const char *filter, const char *notfilter)
+{
+ return -EOPNOTSUPP;
+}
#endif
/**
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 2ec4807d4ea8..d7ab4f96d705 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -200,12 +200,12 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/*
* The two FMODE_NONOTIFY* define which fsnotify events should not be generated
- * for a file. These are the possible values of (f->f_mode &
- * FMODE_FSNOTIFY_MASK) and their meaning:
+ * for an open file. These are the possible values of
+ * (f->f_mode & FMODE_FSNOTIFY_MASK) and their meaning:
*
* FMODE_NONOTIFY - suppress all (incl. non-permission) events.
* FMODE_NONOTIFY_PERM - suppress permission (incl. pre-content) events.
- * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only pre-content events.
+ * FMODE_NONOTIFY | FMODE_NONOTIFY_PERM - suppress only FAN_ACCESS_PERM.
*/
#define FMODE_FSNOTIFY_MASK \
(FMODE_NONOTIFY | FMODE_NONOTIFY_PERM)
@@ -213,13 +213,13 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_FSNOTIFY_NONE(mode) \
((mode & FMODE_FSNOTIFY_MASK) == FMODE_NONOTIFY)
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-#define FMODE_FSNOTIFY_PERM(mode) \
+#define FMODE_FSNOTIFY_HSM(mode) \
((mode & FMODE_FSNOTIFY_MASK) == 0 || \
(mode & FMODE_FSNOTIFY_MASK) == (FMODE_NONOTIFY | FMODE_NONOTIFY_PERM))
-#define FMODE_FSNOTIFY_HSM(mode) \
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) \
((mode & FMODE_FSNOTIFY_MASK) == 0)
#else
-#define FMODE_FSNOTIFY_PERM(mode) 0
+#define FMODE_FSNOTIFY_ACCESS_PERM(mode) 0
#define FMODE_FSNOTIFY_HSM(mode) 0
#endif
@@ -526,7 +526,7 @@ struct address_space {
/*
* On most architectures that alignment is already the case; but
* must be enforced here for CRIS, to let the least significant bit
- * of struct page's "mapping" pointer be used for PAGE_MAPPING_ANON.
+ * of struct folio's "mapping" pointer be used for FOLIO_MAPPING_ANON.
*/
/* XArray tags, for tagging dirty and writeback pages in the pagecache. */
@@ -1043,6 +1043,7 @@ struct fown_struct {
* and so were/are genuinely "ahead". Start next readahead when
* the first of these pages is accessed.
* @ra_pages: Maximum size of a readahead request, copied from the bdi.
+ * @order: Preferred folio order used for most recent readahead.
* @mmap_miss: How many mmap accesses missed in the page cache.
* @prev_pos: The last byte in the most recent read request.
*
@@ -1054,7 +1055,8 @@ struct file_ra_state {
unsigned int size;
unsigned int async_size;
unsigned int ra_pages;
- unsigned int mmap_miss;
+ unsigned short order;
+ unsigned short mmap_miss;
loff_t prev_pos;
};
@@ -3756,9 +3758,14 @@ void setattr_copy(struct mnt_idmap *, struct inode *inode,
extern int file_update_time(struct file *file);
+static inline bool file_is_dax(const struct file *file)
+{
+ return file && IS_DAX(file->f_mapping->host);
+}
+
static inline bool vma_is_dax(const struct vm_area_struct *vma)
{
- return vma->vm_file && IS_DAX(vma->vm_file->f_mapping->host);
+ return file_is_dax(vma->vm_file);
}
static inline bool vma_is_fsdax(struct vm_area_struct *vma)
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 454d8e466958..28a9cb13fbfa 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -129,7 +129,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
-void file_set_fsnotify_mode_from_watchers(struct file *file);
+int fsnotify_open_perm_and_set_mode(struct file *file);
/*
* fsnotify_file_area_perm - permission hook before access to file range
@@ -147,9 +147,6 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
if (!(perm_mask & (MAY_READ | MAY_WRITE | MAY_ACCESS)))
return 0;
- if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode)))
- return 0;
-
/*
* read()/write() and other types of access generate pre-content events.
*/
@@ -160,7 +157,8 @@ static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
return ret;
}
- if (!(perm_mask & MAY_READ))
+ if (!(perm_mask & MAY_READ) ||
+ likely(!FMODE_FSNOTIFY_ACCESS_PERM(file->f_mode)))
return 0;
/*
@@ -208,28 +206,10 @@ static inline int fsnotify_file_perm(struct file *file, int perm_mask)
return fsnotify_file_area_perm(file, perm_mask, NULL, 0);
}
-/*
- * fsnotify_open_perm - permission hook before file open
- */
-static inline int fsnotify_open_perm(struct file *file)
-{
- int ret;
-
- if (likely(!FMODE_FSNOTIFY_PERM(file->f_mode)))
- return 0;
-
- if (file->f_flags & __FMODE_EXEC) {
- ret = fsnotify_path(&file->f_path, FS_OPEN_EXEC_PERM);
- if (ret)
- return ret;
- }
-
- return fsnotify_path(&file->f_path, FS_OPEN_PERM);
-}
-
#else
-static inline void file_set_fsnotify_mode_from_watchers(struct file *file)
+static inline int fsnotify_open_perm_and_set_mode(struct file *file)
{
+ return 0;
}
static inline int fsnotify_file_area_perm(struct file *file, int perm_mask,
@@ -253,11 +233,6 @@ static inline int fsnotify_file_perm(struct file *file, int perm_mask)
{
return 0;
}
-
-static inline int fsnotify_open_perm(struct file *file)
-{
- return 0;
-}
#endif
/*
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index b672ca15f265..7ded7df6e9b5 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -1108,7 +1108,7 @@ static __always_inline unsigned long get_lock_parent_ip(void)
# define trace_preempt_off(a0, a1) do { } while (0)
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_init(void);
#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index be160e8d8bcb..5ebf26fcdcfa 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -423,9 +423,14 @@ static inline bool gfp_compaction_allowed(gfp_t gfp_mask)
extern gfp_t vma_thp_gfp_mask(struct vm_area_struct *vma);
#ifdef CONFIG_CONTIG_ALLOC
+
+typedef unsigned int __bitwise acr_flags_t;
+#define ACR_FLAGS_NONE ((__force acr_flags_t)0) // ordinary allocation request
+#define ACR_FLAGS_CMA ((__force acr_flags_t)BIT(0)) // allocate for CMA
+
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range_noprof(unsigned long start, unsigned long end,
- unsigned migratetype, gfp_t gfp_mask);
+ acr_flags_t alloc_flags, gfp_t gfp_mask);
#define alloc_contig_range(...) alloc_hooks(alloc_contig_range_noprof(__VA_ARGS__))
extern struct page *alloc_contig_pages_noprof(unsigned long nr_pages, gfp_t gfp_mask,
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 568a9d8c749b..2cc4f1e4ea96 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -1216,7 +1216,11 @@ static inline void hid_hw_wait(struct hid_device *hdev)
/**
* hid_report_len - calculate the report length
*
- * @report: the report we want to know the length
+ * @report: the report whose length we want to know
+ *
+ * The length counts the report ID byte, but only if the ID is nonzero
+ * and therefore is included in the report. Reports whose ID is zero
+ * never include an ID byte.
*/
static inline u32 hid_report_len(struct hid_report *report)
{
@@ -1239,6 +1243,8 @@ void hid_quirks_exit(__u16 bus);
dev_notice(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_warn(hid, fmt, ...) \
dev_warn(&(hid)->dev, fmt, ##__VA_ARGS__)
+#define hid_warn_ratelimited(hid, fmt, ...) \
+ dev_warn_ratelimited(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_info(hid, fmt, ...) \
dev_info(&(hid)->dev, fmt, ##__VA_ARGS__)
#define hid_dbg(hid, fmt, ...) \
diff --git a/include/linux/highmem-internal.h b/include/linux/highmem-internal.h
index 9a7683d79a4b..36053c3d6d64 100644
--- a/include/linux/highmem-internal.h
+++ b/include/linux/highmem-internal.h
@@ -195,7 +195,7 @@ static inline void *kmap_local_page_try_from_panic(struct page *page)
static inline void *kmap_local_folio(struct folio *folio, size_t offset)
{
- return page_address(&folio->page) + offset;
+ return folio_address(folio) + offset;
}
static inline void *kmap_local_page_prot(struct page *page, pgprot_t prot)
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index e48d7f27b0b9..6234f316468c 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -292,12 +292,6 @@ static inline void zero_user_segment(struct page *page,
zero_user_segments(page, start, end, 0, 0);
}
-static inline void zero_user(struct page *page,
- unsigned start, unsigned size)
-{
- zero_user_segments(page, start, start + size, 0, 0);
-}
-
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE
static inline void copy_user_highpage(struct page *to, struct page *from,
@@ -688,10 +682,4 @@ static inline void folio_release_kmap(struct folio *folio, void *addr)
kunmap_local(addr);
folio_put(folio);
}
-
-static inline void unmap_and_put_page(struct page *page, void *addr)
-{
- folio_release_kmap(page_folio(page), addr);
-}
-
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 99fcf65d575f..0c4c84b8c3be 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -556,9 +556,9 @@ int hisi_qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
struct hisi_acc_sgl_pool;
struct hisi_acc_hw_sgl *hisi_acc_sg_buf_map_to_hw_sgl(struct device *dev,
struct scatterlist *sgl, struct hisi_acc_sgl_pool *pool,
- u32 index, dma_addr_t *hw_sgl_dma);
+ u32 index, dma_addr_t *hw_sgl_dma, enum dma_data_direction dir);
void hisi_acc_sg_buf_unmap(struct device *dev, struct scatterlist *sgl,
- struct hisi_acc_hw_sgl *hw_sgl);
+ struct hisi_acc_hw_sgl *hw_sgl, enum dma_data_direction dir);
struct hisi_acc_sgl_pool *hisi_acc_create_sgl_pool(struct device *dev,
u32 count, u32 sge_nr);
void hisi_acc_free_sgl_pool(struct device *dev,
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 2f190c90192d..7748489fde1b 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -37,8 +37,10 @@ int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd, unsigned long addr, pgprot_t newprot,
unsigned long cp_flags);
-vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn, bool write);
-vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn, bool write);
+vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
+vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
+ bool write);
vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
bool write);
vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
@@ -261,7 +263,7 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
}
unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
unsigned long tva_flags,
unsigned long orders);
@@ -282,7 +284,7 @@ unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
*/
static inline
unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
unsigned long tva_flags,
unsigned long orders)
{
@@ -317,7 +319,7 @@ struct thpsize {
(1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
static inline bool vma_thp_disabled(struct vm_area_struct *vma,
- unsigned long vm_flags)
+ vm_flags_t vm_flags)
{
/*
* Explicitly disabled through madvise or prctl, or some
@@ -400,8 +402,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
#define split_huge_pmd(__vma, __pmd, __address) \
do { \
pmd_t *____pmd = (__pmd); \
- if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
- || pmd_devmap(*____pmd)) \
+ if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)) \
__split_huge_pmd(__vma, __pmd, __address, \
false); \
} while (0)
@@ -426,16 +427,14 @@ change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
#define split_huge_pud(__vma, __pud, __address) \
do { \
pud_t *____pud = (__pud); \
- if (pud_trans_huge(*____pud) \
- || pud_devmap(*____pud)) \
+ if (pud_trans_huge(*____pud)) \
__split_huge_pud(__vma, __pud, __address); \
} while (0)
-int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
+int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags,
int advice);
-int madvise_collapse(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start, unsigned long end);
+int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end, bool *lock_dropped);
void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct vm_area_struct *next);
spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
@@ -450,7 +449,7 @@ static inline int is_swap_pmd(pmd_t pmd)
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
- if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
+ if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
return NULL;
@@ -458,7 +457,7 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
- if (pud_trans_huge(*pud) || pud_devmap(*pud))
+ if (pud_trans_huge(*pud))
return __pud_trans_huge_lock(pud, vma);
else
return NULL;
@@ -473,9 +472,6 @@ static inline bool folio_test_pmd_mappable(struct folio *folio)
return folio_order(folio) >= HPAGE_PMD_ORDER;
}
-struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
- pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
-
vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
extern struct folio *huge_zero_folio;
@@ -486,9 +482,14 @@ static inline bool is_huge_zero_folio(const struct folio *folio)
return READ_ONCE(huge_zero_folio) == folio;
}
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+ return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
+}
+
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
- return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
+ return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
}
struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
@@ -524,7 +525,7 @@ static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
}
static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
unsigned long tva_flags,
unsigned long orders)
{
@@ -593,14 +594,14 @@ static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
do { } while (0)
static inline int hugepage_madvise(struct vm_area_struct *vma,
- unsigned long *vm_flags, int advice)
+ vm_flags_t *vm_flags, int advice)
{
return -EINVAL;
}
static inline int madvise_collapse(struct vm_area_struct *vma,
- struct vm_area_struct **prev,
- unsigned long start, unsigned long end)
+ unsigned long start,
+ unsigned long end, bool *lock_dropped)
{
return -EINVAL;
}
@@ -636,6 +637,11 @@ static inline bool is_huge_zero_folio(const struct folio *folio)
return false;
}
+static inline bool is_huge_zero_pfn(unsigned long pfn)
+{
+ return false;
+}
+
static inline bool is_huge_zero_pmd(pmd_t pmd)
{
return false;
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 42f374e828a2..526d27e88b3b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -149,7 +149,7 @@ int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
uffd_flags_t flags,
struct folio **foliop);
#endif /* CONFIG_USERFAULTFD */
-bool hugetlb_reserve_pages(struct inode *inode, long from, long to,
+long hugetlb_reserve_pages(struct inode *inode, long from, long to,
struct vm_area_struct *vma,
vm_flags_t vm_flags);
long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
@@ -359,12 +359,6 @@ static inline void hugetlb_show_meminfo_node(int nid)
{
}
-static inline int prepare_hugepage_range(struct file *file,
- unsigned long addr, unsigned long len)
-{
- return -EINVAL;
-}
-
static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma)
{
}
@@ -396,13 +390,6 @@ static inline int is_hugepage_only_range(struct mm_struct *mm,
return 0;
}
-static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
- unsigned long addr, unsigned long end,
- unsigned long floor, unsigned long ceiling)
-{
- BUG();
-}
-
#ifdef CONFIG_USERFAULTFD
static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
struct vm_area_struct *dst_vma,
@@ -740,6 +727,11 @@ extern unsigned int default_hstate_idx;
#define default_hstate (hstates[default_hstate_idx])
+static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
+{
+ return HUGETLBFS_SB(inode->i_sb)->spool;
+}
+
static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio)
{
return folio->_hugetlb_subpool;
diff --git a/include/linux/intel_dg_nvm_aux.h b/include/linux/intel_dg_nvm_aux.h
new file mode 100644
index 000000000000..625d46a6b96e
--- /dev/null
+++ b/include/linux/intel_dg_nvm_aux.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright(c) 2019-2025, Intel Corporation. All rights reserved.
+ */
+
+#ifndef __INTEL_DG_NVM_AUX_H__
+#define __INTEL_DG_NVM_AUX_H__
+
+#include <linux/auxiliary_bus.h>
+#include <linux/container_of.h>
+#include <linux/ioport.h>
+#include <linux/types.h>
+
+#define INTEL_DG_NVM_REGIONS 13
+
+struct intel_dg_nvm_region {
+ const char *name;
+};
+
+struct intel_dg_nvm_dev {
+ struct auxiliary_device aux_dev;
+ bool writable_override;
+ bool non_posted_erase;
+ struct resource bar;
+ struct resource bar2;
+ const struct intel_dg_nvm_region *regions;
+};
+
+#define auxiliary_dev_to_intel_dg_nvm_dev(auxiliary_dev) \
+ container_of(auxiliary_dev, struct intel_dg_nvm_dev, aux_dev)
+
+#endif /* __INTEL_DG_NVM_AUX_H__ */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 7073be1d8841..c30d12e16473 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -14,6 +14,7 @@
#include <linux/err.h>
#include <linux/of.h>
#include <linux/iova_bitmap.h>
+#include <uapi/linux/iommufd.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -558,12 +559,52 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
}
/**
+ * __iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @dst_data: Pointer to a struct iommu_user_data for user space data location
+ * @src_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @src_data. Must match with @dst_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _src)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int
+__iommu_copy_struct_to_user(const struct iommu_user_data *dst_data,
+ void *src_data, unsigned int data_type,
+ size_t data_len, size_t min_len)
+{
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (dst_data->type != data_type)
+ return -EINVAL;
+ if (dst_data->len < min_len || data_len < dst_data->len)
+ return -EINVAL;
+ return copy_struct_to_user(dst_data->uptr, dst_data->len, src_data,
+ data_len, NULL);
+}
+
+/**
+ * iommu_copy_struct_to_user - Report iommu driver specific user space data
+ * @user_data: Pointer to a struct iommu_user_data for user space data location
+ * @ksrc: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @data_type: The data type of the @ksrc. Must match with @user_data->type
+ * @min_last: The last member of the data structure @ksrc points in the initial
+ * version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_to_user(user_data, ksrc, data_type, min_last) \
+ __iommu_copy_struct_to_user(user_data, ksrc, data_type, sizeof(*ksrc), \
+ offsetofend(typeof(*ksrc), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @hw_info: report iommu hardware information. The data buffer returned by this
* op is allocated in the iommu driver and freed by the caller after
- * use. The information type is one of enum iommu_hw_info_type defined
- * in include/uapi/linux/iommufd.h.
+ * use. @type can input a requested type and output a supported type.
+ * Driver should reject an unsupported data @type input
* @domain_alloc: Do not use in new drivers
* @domain_alloc_identity: allocate an IDENTITY domain. Drivers should prefer to
* use identity_domain instead. This should only be used
@@ -596,14 +637,16 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
* - IOMMU_DOMAIN_DMA: must use a dma domain
* - 0: use the default setting
* @default_domain_ops: the default ops for domains
- * @viommu_alloc: Allocate an iommufd_viommu on a physical IOMMU instance behind
- * the @dev, as the set of virtualization resources shared/passed
- * to user space IOMMU instance. And associate it with a nesting
- * @parent_domain. The @viommu_type must be defined in the header
- * include/uapi/linux/iommufd.h
- * It is required to call iommufd_viommu_alloc() helper for
- * a bundled allocation of the core and the driver structures,
- * using the given @ictx pointer.
+ * @get_viommu_size: Get the size of a driver-level vIOMMU structure for a given
+ * @dev corresponding to @viommu_type. Driver should return 0
+ * if vIOMMU isn't supported accordingly. It is required for
+ * driver to use the VIOMMU_STRUCT_SIZE macro to sanitize the
+ * driver-level vIOMMU structure related to the core one
+ * @viommu_init: Init the driver-level struct of an iommufd_viommu on a physical
+ * IOMMU instance @viommu->iommu_dev, as the set of virtualization
+ * resources shared/passed to user space IOMMU instance. Associate
+ * it with a nesting @parent_domain. It is required for driver to
+ * set @viommu->ops pointing to its own viommu_ops
* @owner: Driver module providing these ops
* @identity_domain: An always available, always attachable identity
* translation.
@@ -619,7 +662,8 @@ iommu_copy_struct_from_full_user_array(void *kdst, size_t kdst_entry_size,
*/
struct iommu_ops {
bool (*capable)(struct device *dev, enum iommu_cap);
- void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
+ void *(*hw_info)(struct device *dev, u32 *length,
+ enum iommu_hw_info_type *type);
/* Domain allocation and freeing by the iommu driver */
#if IS_ENABLED(CONFIG_FSL_PAMU)
@@ -653,9 +697,11 @@ struct iommu_ops {
int (*def_domain_type)(struct device *dev);
- struct iommufd_viommu *(*viommu_alloc)(
- struct device *dev, struct iommu_domain *parent_domain,
- struct iommufd_ctx *ictx, unsigned int viommu_type);
+ size_t (*get_viommu_size)(struct device *dev,
+ enum iommu_viommu_type viommu_type);
+ int (*viommu_init)(struct iommufd_viommu *viommu,
+ struct iommu_domain *parent_domain,
+ const struct iommu_user_data *user_data);
const struct iommu_domain_ops *default_domain_ops;
struct module *owner;
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 34b6e6ca4bfa..6e7efe83bc5d 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -37,6 +37,7 @@ enum iommufd_object_type {
IOMMUFD_OBJ_VIOMMU,
IOMMUFD_OBJ_VDEVICE,
IOMMUFD_OBJ_VEVENTQ,
+ IOMMUFD_OBJ_HW_QUEUE,
#ifdef CONFIG_IOMMUFD_TEST
IOMMUFD_OBJ_SELFTEST,
#endif
@@ -45,7 +46,13 @@ enum iommufd_object_type {
/* Base struct for all objects with a userspace ID handle. */
struct iommufd_object {
- refcount_t shortterm_users;
+ /*
+ * Destroy will sleep and wait for wait_cnt to go to zero. This allows
+ * concurrent users of the ID to reliably avoid causing a spurious
+ * destroy failure. Incrementing this count should either be short
+ * lived or be revoked and blocked during pre_destroy().
+ */
+ refcount_t wait_cnt;
refcount_t users;
enum iommufd_object_type type;
unsigned int id;
@@ -101,7 +108,36 @@ struct iommufd_viommu {
struct list_head veventqs;
struct rw_semaphore veventqs_rwsem;
- unsigned int type;
+ enum iommu_viommu_type type;
+};
+
+struct iommufd_vdevice {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_device *idev;
+
+ /*
+ * Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID of
+ * AMD IOMMU, and vRID of Intel VT-d
+ */
+ u64 virt_id;
+
+ /* Clean up all driver-specific parts of an iommufd_vdevice */
+ void (*destroy)(struct iommufd_vdevice *vdev);
+};
+
+struct iommufd_hw_queue {
+ struct iommufd_object obj;
+ struct iommufd_viommu *viommu;
+ struct iommufd_access *access;
+
+ u64 base_addr; /* in guest physical address space */
+ size_t length;
+
+ enum iommu_hw_queue_type type;
+
+ /* Clean up all driver-specific parts of an iommufd_hw_queue */
+ void (*destroy)(struct iommufd_hw_queue *hw_queue);
};
/**
@@ -120,6 +156,30 @@ struct iommufd_viommu {
* array->entry_num to report the number of handled requests.
* The data structure of the array entry must be defined in
* include/uapi/linux/iommufd.h
+ * @vdevice_size: Size of the driver-defined vDEVICE structure per this vIOMMU
+ * @vdevice_init: Initialize the driver-level structure of a vDEVICE object, or
+ * related HW procedure. @vdev is already initialized by iommufd
+ * core: vdev->dev and vdev->viommu pointers; vdev->id carries a
+ * per-vIOMMU virtual ID (refer to struct iommu_vdevice_alloc in
+ * include/uapi/linux/iommufd.h)
+ * If driver has a deinit function to revert what vdevice_init op
+ * does, it should set it to the @vdev->destroy function pointer
+ * @get_hw_queue_size: Get the size of a driver-defined HW queue structure for a
+ * given @viommu corresponding to @queue_type. Driver should
+ * return 0 if HW queue aren't supported accordingly. It is
+ * required for driver to use the HW_QUEUE_STRUCT_SIZE macro
+ * to sanitize the driver-level HW queue structure related
+ * to the core one
+ * @hw_queue_init_phys: Initialize the driver-level structure of a HW queue that
+ * is initialized with its core-level structure that holds
+ * all the info about a guest queue memory.
+ * Driver providing this op indicates that HW accesses the
+ * guest queue memory via physical addresses.
+ * @index carries the logical HW QUEUE ID per vIOMMU in a
+ * guest VM, for a multi-queue model. @base_addr_pa carries
+ * the physical location of the guest queue
+ * If driver has a deinit function to revert what this op
+ * does, it should set it to the @hw_queue->destroy pointer
*/
struct iommufd_viommu_ops {
void (*destroy)(struct iommufd_viommu *viommu);
@@ -128,6 +188,13 @@ struct iommufd_viommu_ops {
const struct iommu_user_data *user_data);
int (*cache_invalidate)(struct iommufd_viommu *viommu,
struct iommu_user_data_array *array);
+ const size_t vdevice_size;
+ int (*vdevice_init)(struct iommufd_vdevice *vdev);
+ size_t (*get_hw_queue_size)(struct iommufd_viommu *viommu,
+ enum iommu_hw_queue_type queue_type);
+ /* AMD's HW will add hw_queue_init simply using @hw_queue->base_addr */
+ int (*hw_queue_init_phys)(struct iommufd_hw_queue *hw_queue, u32 index,
+ phys_addr_t base_addr_pa);
};
#if IS_ENABLED(CONFIG_IOMMUFD)
@@ -171,8 +238,9 @@ static inline void iommufd_access_unpin_pages(struct iommufd_access *access,
{
}
-static inline int iommufd_access_rw(struct iommufd_access *access, unsigned long iova,
- void *data, size_t len, unsigned int flags)
+static inline int iommufd_access_rw(struct iommufd_access *access,
+ unsigned long iova, void *data, size_t len,
+ unsigned int flags)
{
return -EOPNOTSUPP;
}
@@ -189,9 +257,16 @@ static inline int iommufd_vfio_compat_set_no_iommu(struct iommufd_ctx *ictx)
#endif /* CONFIG_IOMMUFD */
#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER_CORE)
-struct iommufd_object *_iommufd_object_alloc(struct iommufd_ctx *ictx,
- size_t size,
- enum iommufd_object_type type);
+int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+void _iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended);
+int _iommufd_alloc_mmap(struct iommufd_ctx *ictx, struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset);
+void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner, unsigned long offset);
+struct device *iommufd_vdevice_to_device(struct iommufd_vdevice *vdev);
struct device *iommufd_viommu_find_dev(struct iommufd_viommu *viommu,
unsigned long vdev_id);
int iommufd_viommu_get_vdev_id(struct iommufd_viommu *viommu,
@@ -200,11 +275,36 @@ int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
enum iommu_veventq_type type, void *event_data,
size_t data_len);
#else /* !CONFIG_IOMMUFD_DRIVER_CORE */
-static inline struct iommufd_object *
-_iommufd_object_alloc(struct iommufd_ctx *ictx, size_t size,
- enum iommufd_object_type type)
+static inline int _iommufd_object_depend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+_iommufd_object_undepend(struct iommufd_object *obj_dependent,
+ struct iommufd_object *obj_depended)
+{
+}
+
+static inline int _iommufd_alloc_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ phys_addr_t mmio_addr, size_t length,
+ unsigned long *offset)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void _iommufd_destroy_mmap(struct iommufd_ctx *ictx,
+ struct iommufd_object *owner,
+ unsigned long offset)
{
- return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline struct device *
+iommufd_vdevice_to_device(struct iommufd_vdevice *vdev)
+{
+ return NULL;
}
static inline struct device *
@@ -228,21 +328,73 @@ static inline int iommufd_viommu_report_event(struct iommufd_viommu *viommu,
}
#endif /* CONFIG_IOMMUFD_DRIVER_CORE */
+#define VIOMMU_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_viommu, \
+ ((drv_struct *)NULL)->member)))
+
+#define VDEVICE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_vdevice, \
+ ((drv_struct *)NULL)->member)))
+
+#define HW_QUEUE_STRUCT_SIZE(drv_struct, member) \
+ (sizeof(drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof(drv_struct, member)) + \
+ BUILD_BUG_ON_ZERO(!__same_type(struct iommufd_hw_queue, \
+ ((drv_struct *)NULL)->member)))
+
/*
- * Helpers for IOMMU driver to allocate driver structures that will be freed by
- * the iommufd core. The free op will be called prior to freeing the memory.
+ * Helpers for IOMMU driver to build/destroy a dependency between two sibling
+ * structures created by one of the allocators above
*/
-#define iommufd_viommu_alloc(ictx, drv_struct, member, viommu_ops) \
+#define iommufd_hw_queue_depend(dependent, depended, member) \
({ \
- drv_struct *ret; \
+ int ret = -EINVAL; \
\
- static_assert(__same_type(struct iommufd_viommu, \
- ((drv_struct *)NULL)->member)); \
- static_assert(offsetof(drv_struct, member.obj) == 0); \
- ret = (drv_struct *)_iommufd_object_alloc( \
- ictx, sizeof(drv_struct), IOMMUFD_OBJ_VIOMMU); \
- if (!IS_ERR(ret)) \
- ret->member.ops = viommu_ops; \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ if (!WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu)) \
+ ret = _iommufd_object_depend(&dependent->member.obj, \
+ &depended->member.obj); \
ret; \
})
+
+#define iommufd_hw_queue_undepend(dependent, depended, member) \
+ ({ \
+ static_assert(__same_type(struct iommufd_hw_queue, \
+ dependent->member)); \
+ static_assert(__same_type(typeof(*dependent), *depended)); \
+ WARN_ON_ONCE(dependent->member.viommu != \
+ depended->member.viommu); \
+ _iommufd_object_undepend(&dependent->member.obj, \
+ &depended->member.obj); \
+ })
+
+/*
+ * Helpers for IOMMU driver to alloc/destroy an mmapable area for a structure.
+ *
+ * To support an mmappable MMIO region, kernel driver must first register it to
+ * iommufd core to allocate an @offset, during a driver-structure initialization
+ * (e.g. viommu_init op). Then, it should report to user space this @offset and
+ * the @length of the MMIO region for mmap syscall.
+ */
+static inline int iommufd_viommu_alloc_mmap(struct iommufd_viommu *viommu,
+ phys_addr_t mmio_addr,
+ size_t length,
+ unsigned long *offset)
+{
+ return _iommufd_alloc_mmap(viommu->ictx, &viommu->obj, mmio_addr,
+ length, offset);
+}
+
+static inline void iommufd_viommu_destroy_mmap(struct iommufd_viommu *viommu,
+ unsigned long offset)
+{
+ _iommufd_destroy_mmap(viommu->ictx, &viommu->obj, offset);
+}
#endif
diff --git a/include/linux/irq-entry-common.h b/include/linux/irq-entry-common.h
index 8af374331900..4fb2f93d82ed 100644
--- a/include/linux/irq-entry-common.h
+++ b/include/linux/irq-entry-common.h
@@ -49,6 +49,22 @@ static __always_inline void arch_enter_from_user_mode(struct pt_regs *regs) {}
#endif
/**
+ * arch_in_rcu_eqs - Architecture specific check for RCU extended quiescent
+ * states.
+ *
+ * Returns: true if the CPU is potentially in an RCU EQS, false otherwise.
+ *
+ * Architectures only need to define this if threads other than the idle thread
+ * may have an interruptible EQS. This does not need to handle idle threads. It
+ * is safe to over-estimate at the cost of redundant RCU management work.
+ *
+ * Invoked from irqentry_enter()
+ */
+#ifndef arch_in_rcu_eqs
+static __always_inline bool arch_in_rcu_eqs(void) { return false; }
+#endif
+
+/**
* enter_from_user_mode - Establish state when coming from user mode
*
* Syscall/interrupt entry disables interrupts, but user mode is traced as
diff --git a/include/linux/irqbypass.h b/include/linux/irqbypass.h
index 9bdb2a781841..ede1fa938152 100644
--- a/include/linux/irqbypass.h
+++ b/include/linux/irqbypass.h
@@ -10,6 +10,7 @@
#include <linux/list.h>
+struct eventfd_ctx;
struct irq_bypass_consumer;
/*
@@ -18,20 +19,22 @@ struct irq_bypass_consumer;
* The IRQ bypass manager is a simple set of lists and callbacks that allows
* IRQ producers (ex. physical interrupt sources) to be matched to IRQ
* consumers (ex. virtualization hardware that allows IRQ bypass or offload)
- * via a shared token (ex. eventfd_ctx). Producers and consumers register
- * independently. When a token match is found, the optional @stop callback
- * will be called for each participant. The pair will then be connected via
- * the @add_* callbacks, and finally the optional @start callback will allow
- * any final coordination. When either participant is unregistered, the
- * process is repeated using the @del_* callbacks in place of the @add_*
- * callbacks. Match tokens must be unique per producer/consumer, 1:N pairings
- * are not supported.
+ * via a shared eventfd_ctx. Producers and consumers register independently.
+ * When a producer and consumer are paired, i.e. an eventfd match is found, the
+ * optional @stop callback will be called for each participant. The pair will
+ * then be connected via the @add_* callbacks, and finally the optional @start
+ * callback will allow any final coordination. When either participant is
+ * unregistered, the process is repeated using the @del_* callbacks in place of
+ * the @add_* callbacks. eventfds must be unique per producer/consumer, 1:N
+ * pairings are not supported.
*/
+struct irq_bypass_consumer;
+
/**
* struct irq_bypass_producer - IRQ bypass producer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @consumer: The connected consumer (NULL if no connection)
* @irq: Linux IRQ number for the producer device
* @add_consumer: Connect the IRQ producer to an IRQ consumer (optional)
* @del_consumer: Disconnect the IRQ producer from an IRQ consumer (optional)
@@ -43,8 +46,8 @@ struct irq_bypass_consumer;
* for a physical device assigned to a VM.
*/
struct irq_bypass_producer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_consumer *consumer;
int irq;
int (*add_consumer)(struct irq_bypass_producer *,
struct irq_bypass_consumer *);
@@ -56,8 +59,8 @@ struct irq_bypass_producer {
/**
* struct irq_bypass_consumer - IRQ bypass consumer definition
- * @node: IRQ bypass manager private list management
- * @token: opaque token to match between producer and consumer (non-NULL)
+ * @eventfd: eventfd context used to match producers and consumers
+ * @producer: The connected producer (NULL if no connection)
* @add_producer: Connect the IRQ consumer to an IRQ producer
* @del_producer: Disconnect the IRQ consumer from an IRQ producer
* @stop: Perform any quiesce operations necessary prior to add/del (optional)
@@ -69,8 +72,9 @@ struct irq_bypass_producer {
* portions of the interrupt handling to the VM.
*/
struct irq_bypass_consumer {
- struct list_head node;
- void *token;
+ struct eventfd_ctx *eventfd;
+ struct irq_bypass_producer *producer;
+
int (*add_producer)(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void (*del_producer)(struct irq_bypass_consumer *,
@@ -79,9 +83,11 @@ struct irq_bypass_consumer {
void (*start)(struct irq_bypass_consumer *);
};
-int irq_bypass_register_producer(struct irq_bypass_producer *);
-void irq_bypass_unregister_producer(struct irq_bypass_producer *);
-int irq_bypass_register_consumer(struct irq_bypass_consumer *);
-void irq_bypass_unregister_consumer(struct irq_bypass_consumer *);
+int irq_bypass_register_producer(struct irq_bypass_producer *producer,
+ struct eventfd_ctx *eventfd, int irq);
+void irq_bypass_unregister_producer(struct irq_bypass_producer *producer);
+int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer,
+ struct eventfd_ctx *eventfd);
+void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer);
#endif /* IRQBYPASS_H */
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index 7f1f11a5e4e4..0b0887099fd7 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -146,7 +146,7 @@ int its_commit_vpe(struct its_vpe *vpe);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
-int its_unmap_vlpi(int irq);
+void its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
int its_prop_update_vsgi(int irq, u8 priority, bool group);
diff --git a/include/linux/irqchip/arm-gic-v5.h b/include/linux/irqchip/arm-gic-v5.h
new file mode 100644
index 000000000000..68ddcdb1cec5
--- /dev/null
+++ b/include/linux/irqchip/arm-gic-v5.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2025 ARM Limited, All Rights Reserved.
+ */
+#ifndef __LINUX_IRQCHIP_ARM_GIC_V5_H
+#define __LINUX_IRQCHIP_ARM_GIC_V5_H
+
+#include <linux/iopoll.h>
+
+#include <asm/cacheflush.h>
+#include <asm/smp.h>
+#include <asm/sysreg.h>
+
+#define GICV5_IPIS_PER_CPU MAX_IPI
+
+/*
+ * INTID handling
+ */
+#define GICV5_HWIRQ_ID GENMASK(23, 0)
+#define GICV5_HWIRQ_TYPE GENMASK(31, 29)
+#define GICV5_HWIRQ_INTID GENMASK_ULL(31, 0)
+
+#define GICV5_HWIRQ_TYPE_PPI UL(0x1)
+#define GICV5_HWIRQ_TYPE_LPI UL(0x2)
+#define GICV5_HWIRQ_TYPE_SPI UL(0x3)
+
+/*
+ * Tables attributes
+ */
+#define GICV5_NO_READ_ALLOC 0b0
+#define GICV5_READ_ALLOC 0b1
+#define GICV5_NO_WRITE_ALLOC 0b0
+#define GICV5_WRITE_ALLOC 0b1
+
+#define GICV5_NON_CACHE 0b00
+#define GICV5_WB_CACHE 0b01
+#define GICV5_WT_CACHE 0b10
+
+#define GICV5_NON_SHARE 0b00
+#define GICV5_OUTER_SHARE 0b10
+#define GICV5_INNER_SHARE 0b11
+
+/*
+ * IRS registers and tables structures
+ */
+#define GICV5_IRS_IDR1 0x0004
+#define GICV5_IRS_IDR2 0x0008
+#define GICV5_IRS_IDR5 0x0014
+#define GICV5_IRS_IDR6 0x0018
+#define GICV5_IRS_IDR7 0x001c
+#define GICV5_IRS_CR0 0x0080
+#define GICV5_IRS_CR1 0x0084
+#define GICV5_IRS_SYNCR 0x00c0
+#define GICV5_IRS_SYNC_STATUSR 0x00c4
+#define GICV5_IRS_SPI_SELR 0x0108
+#define GICV5_IRS_SPI_CFGR 0x0114
+#define GICV5_IRS_SPI_STATUSR 0x0118
+#define GICV5_IRS_PE_SELR 0x0140
+#define GICV5_IRS_PE_STATUSR 0x0144
+#define GICV5_IRS_PE_CR0 0x0148
+#define GICV5_IRS_IST_BASER 0x0180
+#define GICV5_IRS_IST_CFGR 0x0190
+#define GICV5_IRS_IST_STATUSR 0x0194
+#define GICV5_IRS_MAP_L2_ISTR 0x01c0
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS GENMASK(22, 20)
+#define GICV5_IRS_IDR1_IAFFID_BITS GENMASK(19, 16)
+
+#define GICV5_IRS_IDR1_PRIORITY_BITS_1BITS 0b000
+#define GICV5_IRS_IDR1_PRIORITY_BITS_2BITS 0b001
+#define GICV5_IRS_IDR1_PRIORITY_BITS_3BITS 0b010
+#define GICV5_IRS_IDR1_PRIORITY_BITS_4BITS 0b011
+#define GICV5_IRS_IDR1_PRIORITY_BITS_5BITS 0b100
+
+#define GICV5_IRS_IDR2_ISTMD_SZ GENMASK(19, 15)
+#define GICV5_IRS_IDR2_ISTMD BIT(14)
+#define GICV5_IRS_IDR2_IST_L2SZ GENMASK(13, 11)
+#define GICV5_IRS_IDR2_IST_LEVELS BIT(10)
+#define GICV5_IRS_IDR2_MIN_LPI_ID_BITS GENMASK(9, 6)
+#define GICV5_IRS_IDR2_LPI BIT(5)
+#define GICV5_IRS_IDR2_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IDR5_SPI_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR6_SPI_IRS_RANGE GENMASK(24, 0)
+#define GICV5_IRS_IDR7_SPI_BASE GENMASK(23, 0)
+
+#define GICV5_IRS_IST_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(11), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(12), (r))
+#define GICV5_IRS_IST_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(13), (r))
+
+#define GICV5_IRS_CR0_IDLE BIT(1)
+#define GICV5_IRS_CR0_IRSEN BIT(0)
+
+#define GICV5_IRS_CR1_VPED_WA BIT(15)
+#define GICV5_IRS_CR1_VPED_RA BIT(14)
+#define GICV5_IRS_CR1_VMD_WA BIT(13)
+#define GICV5_IRS_CR1_VMD_RA BIT(12)
+#define GICV5_IRS_CR1_VPET_WA BIT(11)
+#define GICV5_IRS_CR1_VPET_RA BIT(10)
+#define GICV5_IRS_CR1_VMT_WA BIT(9)
+#define GICV5_IRS_CR1_VMT_RA BIT(8)
+#define GICV5_IRS_CR1_IST_WA BIT(7)
+#define GICV5_IRS_CR1_IST_RA BIT(6)
+#define GICV5_IRS_CR1_IC GENMASK(5, 4)
+#define GICV5_IRS_CR1_OC GENMASK(3, 2)
+#define GICV5_IRS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_IRS_SYNCR_SYNC BIT(31)
+
+#define GICV5_IRS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_STATUSR_V BIT(1)
+#define GICV5_IRS_SPI_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_SPI_SELR_ID GENMASK(23, 0)
+
+#define GICV5_IRS_SPI_CFGR_TM BIT(0)
+
+#define GICV5_IRS_PE_SELR_IAFFID GENMASK(15, 0)
+
+#define GICV5_IRS_PE_STATUSR_V BIT(1)
+#define GICV5_IRS_PE_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_PE_CR0_DPS BIT(0)
+
+#define GICV5_IRS_IST_STATUSR_IDLE BIT(0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE BIT(16)
+#define GICV5_IRS_IST_CFGR_ISTSZ GENMASK(8, 7)
+#define GICV5_IRS_IST_CFGR_L2SZ GENMASK(6, 5)
+#define GICV5_IRS_IST_CFGR_LPI_ID_BITS GENMASK(4, 0)
+
+#define GICV5_IRS_IST_CFGR_STRUCTURE_LINEAR 0b0
+#define GICV5_IRS_IST_CFGR_STRUCTURE_TWO_LEVEL 0b1
+
+#define GICV5_IRS_IST_CFGR_ISTSZ_4 0b00
+#define GICV5_IRS_IST_CFGR_ISTSZ_8 0b01
+#define GICV5_IRS_IST_CFGR_ISTSZ_16 0b10
+
+#define GICV5_IRS_IST_CFGR_L2SZ_4K 0b00
+#define GICV5_IRS_IST_CFGR_L2SZ_16K 0b01
+#define GICV5_IRS_IST_CFGR_L2SZ_64K 0b10
+
+#define GICV5_IRS_IST_BASER_ADDR_MASK GENMASK_ULL(55, 6)
+#define GICV5_IRS_IST_BASER_VALID BIT_ULL(0)
+
+#define GICV5_IRS_MAP_L2_ISTR_ID GENMASK(23, 0)
+
+#define GICV5_ISTL1E_VALID BIT_ULL(0)
+
+#define GICV5_ISTL1E_L2_ADDR_MASK GENMASK_ULL(55, 12)
+
+/*
+ * ITS registers and tables structures
+ */
+#define GICV5_ITS_IDR1 0x0004
+#define GICV5_ITS_IDR2 0x0008
+#define GICV5_ITS_CR0 0x0080
+#define GICV5_ITS_CR1 0x0084
+#define GICV5_ITS_DT_BASER 0x00c0
+#define GICV5_ITS_DT_CFGR 0x00d0
+#define GICV5_ITS_DIDR 0x0100
+#define GICV5_ITS_EIDR 0x0108
+#define GICV5_ITS_INV_EVENTR 0x010c
+#define GICV5_ITS_INV_DEVICER 0x0110
+#define GICV5_ITS_STATUSR 0x0120
+#define GICV5_ITS_SYNCR 0x0140
+#define GICV5_ITS_SYNC_STATUSR 0x0148
+
+#define GICV5_ITS_IDR1_L2SZ GENMASK(10, 8)
+#define GICV5_ITS_IDR1_ITT_LEVELS BIT(7)
+#define GICV5_ITS_IDR1_DT_LEVELS BIT(6)
+#define GICV5_ITS_IDR1_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_4KB(r) FIELD_GET(BIT(8), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_16KB(r) FIELD_GET(BIT(9), (r))
+#define GICV5_ITS_IDR1_L2SZ_SUPPORT_64KB(r) FIELD_GET(BIT(10), (r))
+
+#define GICV5_ITS_IDR2_XDMN_EVENTs GENMASK(6, 5)
+#define GICV5_ITS_IDR2_EVENTID_BITS GENMASK(4, 0)
+
+#define GICV5_ITS_CR0_IDLE BIT(1)
+#define GICV5_ITS_CR0_ITSEN BIT(0)
+
+#define GICV5_ITS_CR1_ITT_RA BIT(7)
+#define GICV5_ITS_CR1_DT_RA BIT(6)
+#define GICV5_ITS_CR1_IC GENMASK(5, 4)
+#define GICV5_ITS_CR1_OC GENMASK(3, 2)
+#define GICV5_ITS_CR1_SH GENMASK(1, 0)
+
+#define GICV5_ITS_DT_CFGR_STRUCTURE BIT(16)
+#define GICV5_ITS_DT_CFGR_L2SZ GENMASK(7, 6)
+#define GICV5_ITS_DT_CFGR_DEVICEID_BITS GENMASK(5, 0)
+
+#define GICV5_ITS_DT_BASER_ADDR_MASK GENMASK_ULL(55, 3)
+
+#define GICV5_ITS_INV_DEVICER_I BIT(31)
+#define GICV5_ITS_INV_DEVICER_EVENTID_BITS GENMASK(5, 1)
+#define GICV5_ITS_INV_DEVICER_L1 BIT(0)
+
+#define GICV5_ITS_DIDR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_EIDR_EVENTID GENMASK(15, 0)
+
+#define GICV5_ITS_INV_EVENTR_I BIT(31)
+#define GICV5_ITS_INV_EVENTR_ITT_L2SZ GENMASK(2, 1)
+#define GICV5_ITS_INV_EVENTR_L1 BIT(0)
+
+#define GICV5_ITS_STATUSR_IDLE BIT(0)
+
+#define GICV5_ITS_SYNCR_SYNC BIT_ULL(63)
+#define GICV5_ITS_SYNCR_SYNCALL BIT_ULL(32)
+#define GICV5_ITS_SYNCR_DEVICEID GENMASK_ULL(31, 0)
+
+#define GICV5_ITS_SYNC_STATUSR_IDLE BIT(0)
+
+#define GICV5_DTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_DTL2E_VALID BIT_ULL(0)
+#define GICV5_DTL2E_ITT_L2SZ GENMASK_ULL(2, 1)
+/* Note that there is no shift for the address by design */
+#define GICV5_DTL2E_ITT_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_DTL2E_ITT_DSWE BIT_ULL(57)
+#define GICV5_DTL2E_ITT_STRUCTURE BIT_ULL(58)
+#define GICV5_DTL2E_EVENT_ID_BITS GENMASK_ULL(63, 59)
+
+#define GICV5_ITTL1E_VALID BIT_ULL(0)
+/* Note that there is no shift for the address by design */
+#define GICV5_ITTL1E_L2_ADDR_MASK GENMASK_ULL(55, 3)
+#define GICV5_ITTL1E_SPAN GENMASK_ULL(63, 60)
+
+#define GICV5_ITTL2E_LPI_ID GENMASK_ULL(23, 0)
+#define GICV5_ITTL2E_DAC GENMASK_ULL(29, 28)
+#define GICV5_ITTL2E_VIRTUAL BIT_ULL(30)
+#define GICV5_ITTL2E_VALID BIT_ULL(31)
+#define GICV5_ITTL2E_VM_ID GENMASK_ULL(47, 32)
+
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_4k 0b00
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_16k 0b01
+#define GICV5_ITS_DT_ITT_CFGR_L2SZ_64k 0b10
+
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_LINEAR 0
+#define GICV5_ITS_DT_ITT_CFGR_STRUCTURE_TWO_LEVEL 1
+
+#define GICV5_ITS_HWIRQ_DEVICE_ID GENMASK_ULL(31, 0)
+#define GICV5_ITS_HWIRQ_EVENT_ID GENMASK_ULL(63, 32)
+
+/*
+ * IWB registers
+ */
+#define GICV5_IWB_IDR0 0x0000
+#define GICV5_IWB_CR0 0x0080
+#define GICV5_IWB_WENABLE_STATUSR 0x00c0
+#define GICV5_IWB_WENABLER 0x2000
+#define GICV5_IWB_WTMR 0x4000
+
+#define GICV5_IWB_IDR0_INT_DOMS GENMASK(14, 11)
+#define GICV5_IWB_IDR0_IW_RANGE GENMASK(10, 0)
+
+#define GICV5_IWB_CR0_IDLE BIT(1)
+#define GICV5_IWB_CR0_IWBEN BIT(0)
+
+#define GICV5_IWB_WENABLE_STATUSR_IDLE BIT(0)
+
+/*
+ * Global Data structures and functions
+ */
+struct gicv5_chip_data {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *ppi_domain;
+ struct irq_domain *spi_domain;
+ struct irq_domain *lpi_domain;
+ struct irq_domain *ipi_domain;
+ u32 global_spi_count;
+ u8 cpuif_pri_bits;
+ u8 cpuif_id_bits;
+ u8 irs_pri_bits;
+ struct {
+ __le64 *l1ist_addr;
+ u32 l2_size;
+ u8 l2_bits;
+ bool l2;
+ } ist;
+};
+
+extern struct gicv5_chip_data gicv5_global_data __read_mostly;
+
+struct gicv5_irs_chip_data {
+ struct list_head entry;
+ struct fwnode_handle *fwnode;
+ void __iomem *irs_base;
+ u32 flags;
+ u32 spi_min;
+ u32 spi_range;
+ raw_spinlock_t spi_config_lock;
+};
+
+static inline int gicv5_wait_for_op_s_atomic(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask,
+ u32 *val)
+{
+ void __iomem *reg = addr + offset;
+ u32 tmp;
+ int ret;
+
+ ret = readl_poll_timeout_atomic(reg, tmp, tmp & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ if (val)
+ *val = tmp;
+
+ return 0;
+}
+
+static inline int gicv5_wait_for_op_s(void __iomem *addr, u32 offset,
+ const char *reg_s, u32 mask)
+{
+ void __iomem *reg = addr + offset;
+ u32 val;
+ int ret;
+
+ ret = readl_poll_timeout(reg, val, val & mask, 1, 10 * USEC_PER_MSEC);
+ if (unlikely(ret == -ETIMEDOUT)) {
+ pr_err_ratelimited("%s timeout...\n", reg_s);
+ return ret;
+ }
+
+ return 0;
+}
+
+#define gicv5_wait_for_op_atomic(base, reg, mask, val) \
+ gicv5_wait_for_op_s_atomic(base, reg, #reg, mask, val)
+
+#define gicv5_wait_for_op(base, reg, mask) \
+ gicv5_wait_for_op_s(base, reg, #reg, mask)
+
+void __init gicv5_init_lpi_domain(void);
+void __init gicv5_free_lpi_domain(void);
+
+int gicv5_irs_of_probe(struct device_node *parent);
+void gicv5_irs_remove(void);
+int gicv5_irs_enable(void);
+void gicv5_irs_its_probe(void);
+int gicv5_irs_register_cpu(int cpuid);
+int gicv5_irs_cpu_to_iaffid(int cpu_id, u16 *iaffid);
+struct gicv5_irs_chip_data *gicv5_irs_lookup_by_spi_id(u32 spi_id);
+int gicv5_spi_irq_set_type(struct irq_data *d, unsigned int type);
+int gicv5_irs_iste_alloc(u32 lpi);
+void gicv5_irs_syncr(void);
+
+struct gicv5_its_devtab_cfg {
+ union {
+ struct {
+ __le64 *devtab;
+ } linear;
+ struct {
+ __le64 *l1devtab;
+ __le64 **l2ptrs;
+ } l2;
+ };
+ u32 cfgr;
+};
+
+struct gicv5_its_itt_cfg {
+ union {
+ struct {
+ __le64 *itt;
+ unsigned int num_ents;
+ } linear;
+ struct {
+ __le64 *l1itt;
+ __le64 **l2ptrs;
+ unsigned int num_l1_ents;
+ u8 l2sz;
+ } l2;
+ };
+ u8 event_id_bits;
+ bool l2itt;
+};
+
+void gicv5_init_lpis(u32 max);
+void gicv5_deinit_lpis(void);
+
+int gicv5_alloc_lpi(void);
+void gicv5_free_lpi(u32 lpi);
+
+void __init gicv5_its_of_probe(struct device_node *parent);
+#endif
diff --git a/include/linux/irqchip/arm-vgic-info.h b/include/linux/irqchip/arm-vgic-info.h
index a75b2c7de69d..ca1713fac6e3 100644
--- a/include/linux/irqchip/arm-vgic-info.h
+++ b/include/linux/irqchip/arm-vgic-info.h
@@ -15,6 +15,8 @@ enum gic_type {
GIC_V2,
/* Full GICv3, optionally with v2 compat */
GIC_V3,
+ /* Full GICv5, optionally with v3 compat */
+ GIC_V5,
};
struct gic_kvm_info {
@@ -34,6 +36,8 @@ struct gic_kvm_info {
bool has_v4_1;
/* Deactivation impared, subpar stuff */
bool no_hw_deactivation;
+ /* v3 compat support (GICv5 hosts, only) */
+ bool has_gcie_v3_compat;
};
#ifdef CONFIG_KVM
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 266b5e5bb8ce..4a86e6b915dd 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -212,6 +212,9 @@ enum {
/* Address and data pair is mutable when irq_set_affinity() */
IRQ_DOMAIN_FLAG_MSI_IMMUTABLE = (1 << 11),
+ /* IRQ domain requires parent fwnode matching */
+ IRQ_DOMAIN_FLAG_FWNODE_PARENT = (1 << 12),
+
/*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 1cce1f6410a9..989315dabb86 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -373,9 +373,9 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
+/* Rebuild everything on CONFIG_DYNAMIC_FTRACE */
+#ifdef CONFIG_DYNAMIC_FTRACE
+# define REBUILD_DUE_TO_DYNAMIC_FTRACE
#endif
/* Permissions on a sysfs file: you didn't miss the 0 prefix did you? */
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index b8d69cfbb58b..ff6120463745 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -12,7 +12,7 @@ extern int start_stop_khugepaged(void);
extern void __khugepaged_enter(struct mm_struct *mm);
extern void __khugepaged_exit(struct mm_struct *mm);
extern void khugepaged_enter_vma(struct vm_area_struct *vma,
- unsigned long vm_flags);
+ vm_flags_t vm_flags);
extern void khugepaged_min_free_kbytes_update(void);
extern bool current_is_khugepaged(void);
extern int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
@@ -37,7 +37,7 @@ static inline void khugepaged_exit(struct mm_struct *mm)
{
}
static inline void khugepaged_enter_vma(struct vm_area_struct *vma,
- unsigned long vm_flags)
+ vm_flags_t vm_flags)
{
}
static inline int collapse_pte_mapped_thp(struct mm_struct *mm,
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index d73095b5cd96..c17b955e7b0b 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -16,9 +16,9 @@
#ifdef CONFIG_KSM
int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags);
-
-void ksm_add_vma(struct vm_area_struct *vma);
+ unsigned long end, int advice, vm_flags_t *vm_flags);
+vm_flags_t ksm_vma_flags(const struct mm_struct *mm, const struct file *file,
+ vm_flags_t vm_flags);
int ksm_enable_merge_any(struct mm_struct *mm);
int ksm_disable_merge_any(struct mm_struct *mm);
int ksm_disable(struct mm_struct *mm);
@@ -97,8 +97,10 @@ bool ksm_process_mergeable(struct mm_struct *mm);
#else /* !CONFIG_KSM */
-static inline void ksm_add_vma(struct vm_area_struct *vma)
+static inline vm_flags_t ksm_vma_flags(const struct mm_struct *mm,
+ const struct file *file, vm_flags_t vm_flags)
{
+ return vm_flags;
}
static inline int ksm_disable(struct mm_struct *mm)
@@ -131,7 +133,7 @@ static inline void collect_procs_ksm(const struct folio *folio,
#ifdef CONFIG_MMU
static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
- unsigned long end, int advice, unsigned long *vm_flags)
+ unsigned long end, int advice, vm_flags_t *vm_flags)
{
return 0;
}
diff --git a/include/linux/kvm_dirty_ring.h b/include/linux/kvm_dirty_ring.h
index da4d9b5f58f1..eb10d87adf7d 100644
--- a/include/linux/kvm_dirty_ring.h
+++ b/include/linux/kvm_dirty_ring.h
@@ -49,9 +49,10 @@ static inline int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *r
}
static inline int kvm_dirty_ring_reset(struct kvm *kvm,
- struct kvm_dirty_ring *ring)
+ struct kvm_dirty_ring *ring,
+ int *nr_entries_reset)
{
- return 0;
+ return -ENOENT;
}
static inline void kvm_dirty_ring_push(struct kvm_vcpu *vcpu,
@@ -77,17 +78,8 @@ bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm);
u32 kvm_dirty_ring_get_rsvd_entries(struct kvm *kvm);
int kvm_dirty_ring_alloc(struct kvm *kvm, struct kvm_dirty_ring *ring,
int index, u32 size);
-
-/*
- * called with kvm->slots_lock held, returns the number of
- * processed pages.
- */
-int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring);
-
-/*
- * returns =0: successfully pushed
- * <0: unable to push, need to wait
- */
+int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring,
+ int *nr_entries_reset);
void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset);
bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 3bde4fb5c6aa..15656b7fba6c 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -190,6 +190,7 @@ bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
+#define KVM_PIT_IRQ_SOURCE_ID 2
extern struct mutex kvm_lock;
extern struct list_head vm_list;
@@ -1022,16 +1023,12 @@ void kvm_unlock_all_vcpus(struct kvm *kvm);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
-#ifdef __KVM_HAVE_IOAPIC
+#ifdef CONFIG_KVM_IOAPIC
void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
-void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
-static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
-{
-}
#endif
#ifdef CONFIG_HAVE_KVM_IRQCHIP
@@ -1693,24 +1690,6 @@ static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
return false;
}
#endif
-#ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
-void kvm_arch_start_assignment(struct kvm *kvm);
-void kvm_arch_end_assignment(struct kvm *kvm);
-bool kvm_arch_has_assigned_device(struct kvm *kvm);
-#else
-static inline void kvm_arch_start_assignment(struct kvm *kvm)
-{
-}
-
-static inline void kvm_arch_end_assignment(struct kvm *kvm)
-{
-}
-
-static __always_inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
-{
- return false;
-}
-#endif
static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu)
{
@@ -1788,8 +1767,6 @@ void kvm_register_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
struct kvm_irq_ack_notifier *kian);
-int kvm_request_irq_source_id(struct kvm *kvm);
-void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args);
/*
@@ -2406,6 +2383,8 @@ struct kvm_vcpu *kvm_get_running_vcpu(void);
struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
#if IS_ENABLED(CONFIG_HAVE_KVM_IRQ_BYPASS)
+struct kvm_kernel_irqfd;
+
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
@@ -2413,10 +2392,9 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
struct irq_bypass_producer *);
void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
-int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
- uint32_t guest_irq, bool set);
-bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *,
- struct kvm_kernel_irq_routing_entry *);
+void kvm_arch_update_irqfd_routing(struct kvm_kernel_irqfd *irqfd,
+ struct kvm_kernel_irq_routing_entry *old,
+ struct kvm_kernel_irq_routing_entry *new);
#endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
#ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS
diff --git a/include/linux/kvm_irqfd.h b/include/linux/kvm_irqfd.h
index 8ad43692e3bb..ef8c134ded8a 100644
--- a/include/linux/kvm_irqfd.h
+++ b/include/linux/kvm_irqfd.h
@@ -55,10 +55,13 @@ struct kvm_kernel_irqfd {
/* Used for setup/shutdown */
struct eventfd_ctx *eventfd;
struct list_head list;
- poll_table pt;
struct work_struct shutdown;
struct irq_bypass_consumer consumer;
struct irq_bypass_producer *producer;
+
+ struct kvm_vcpu *irq_bypass_vcpu;
+ struct list_head vcpu_list;
+ void *irq_bypass_data;
};
#endif /* __LINUX_KVM_IRQFD_H */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 21ec856c36bc..775a96217518 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -197,7 +197,7 @@ int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
* @fled_cdev: the flash LED to set
* @timeout: the flash timeout to set it to
*
- * Set the flash strobe duration.
+ * Set the flash strobe timeout.
*
* Returns: 0 on success or negative error value on failure
*/
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b3f0aa081064..b16b803cc1ac 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -294,7 +294,6 @@ void led_remove_lookup(struct led_lookup_data *led_lookup);
struct led_classdev *__must_check led_get(struct device *dev, char *con_id);
struct led_classdev *__must_check devm_led_get(struct device *dev, char *con_id);
-extern struct led_classdev *of_led_get(struct device_node *np, int index);
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index e772aae71843..28f086c4a187 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -6,12 +6,12 @@
*/
#ifndef __LIBNVDIMM_H__
#define __LIBNVDIMM_H__
-#include <linux/kernel.h>
+
+#include <linux/io.h>
#include <linux/sizes.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/uuid.h>
-#include <linux/spinlock.h>
-#include <linux/bio.h>
struct badrange_entry {
u64 start;
@@ -80,7 +80,9 @@ typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
struct nvdimm *nvdimm, unsigned int cmd, void *buf,
unsigned int buf_len, int *cmd_rc);
+struct attribute_group;
struct device_node;
+struct module;
struct nvdimm_bus_descriptor {
const struct attribute_group **attr_groups;
unsigned long cmd_mask;
@@ -121,6 +123,8 @@ struct nd_mapping_desc {
int position;
};
+struct bio;
+struct resource;
struct nd_region;
struct nd_region_desc {
struct resource *res;
@@ -147,8 +151,6 @@ static inline void __iomem *devm_nvdimm_ioremap(struct device *dev,
return (void __iomem *) devm_nvdimm_memremap(dev, offset, size, 0);
}
-struct nvdimm_bus;
-
/*
* Note that separate bits for locked + unlocked are defined so that
* 'flags == 0' corresponds to an error / not-supported state.
@@ -238,6 +240,9 @@ struct nvdimm_fw_ops {
int (*arm)(struct nvdimm *nvdimm, enum nvdimm_fwa_trigger arg);
};
+struct kobject;
+struct nvdimm_bus;
+
void badrange_init(struct badrange *badrange);
int badrange_add(struct badrange *badrange, u64 addr, u64 length);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 27b17f64bcee..607b2360c938 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -83,7 +83,7 @@ static inline void init_llist_head(struct llist_head *list)
*/
static inline void init_llist_node(struct llist_node *node)
{
- node->next = node;
+ WRITE_ONCE(node->next, node);
}
/**
@@ -97,7 +97,7 @@ static inline void init_llist_node(struct llist_node *node)
*/
static inline bool llist_on_list(const struct llist_node *node)
{
- return node->next != node;
+ return READ_ONCE(node->next) != node;
}
/**
@@ -220,7 +220,7 @@ static inline bool llist_empty(const struct llist_head *head)
static inline struct llist_node *llist_next(struct llist_node *node)
{
- return node->next;
+ return READ_ONCE(node->next);
}
/**
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 9ef129038224..bafe143b1f78 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -75,8 +75,8 @@
* searching for gaps or any other code that needs to find the end of the data.
*/
struct maple_metadata {
- unsigned char end;
- unsigned char gap;
+ unsigned char end; /* end of data */
+ unsigned char gap; /* offset of largest gap */
};
/*
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 87b6688f124a..785173aa0739 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -251,8 +251,10 @@ struct mem_cgroup {
* that this indicator should NOT be used in legacy cgroup mode
* where socket memory is accounted/charged separately.
*/
- unsigned long socket_pressure;
-
+ u64 socket_pressure;
+#if BITS_PER_LONG < 64
+ seqlock_t socket_pressure_seqlock;
+#endif
int kmemcg_id;
/*
* memcg->objcg is wiped out as a part of the objcg repaprenting
@@ -1602,6 +1604,42 @@ extern struct static_key_false memcg_sockets_enabled_key;
#define mem_cgroup_sockets_enabled static_branch_unlikely(&memcg_sockets_enabled_key)
void mem_cgroup_sk_alloc(struct sock *sk);
void mem_cgroup_sk_free(struct sock *sk);
+
+#if BITS_PER_LONG < 64
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ u64 val = get_jiffies_64() + HZ;
+ unsigned long flags;
+
+ write_seqlock_irqsave(&memcg->socket_pressure_seqlock, flags);
+ memcg->socket_pressure = val;
+ write_sequnlock_irqrestore(&memcg->socket_pressure_seqlock, flags);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ unsigned int seq;
+ u64 val;
+
+ do {
+ seq = read_seqbegin(&memcg->socket_pressure_seqlock);
+ val = memcg->socket_pressure;
+ } while (read_seqretry(&memcg->socket_pressure_seqlock, seq));
+
+ return val;
+}
+#else
+static inline void mem_cgroup_set_socket_pressure(struct mem_cgroup *memcg)
+{
+ WRITE_ONCE(memcg->socket_pressure, jiffies + HZ);
+}
+
+static inline u64 mem_cgroup_get_socket_pressure(struct mem_cgroup *memcg)
+{
+ return READ_ONCE(memcg->socket_pressure);
+}
+#endif
+
static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
{
#ifdef CONFIG_MEMCG_V1
@@ -1609,7 +1647,7 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg)
return !!memcg->tcpmem_pressure;
#endif /* CONFIG_MEMCG_V1 */
do {
- if (time_before(jiffies, READ_ONCE(memcg->socket_pressure)))
+ if (time_before64(get_jiffies_64(), mem_cgroup_get_socket_pressure(memcg)))
return true;
} while ((memcg = parent_mem_cgroup(memcg)));
return false;
diff --git a/include/linux/memfd.h b/include/linux/memfd.h
index 246daadbfde8..6f606d9573c3 100644
--- a/include/linux/memfd.h
+++ b/include/linux/memfd.h
@@ -14,7 +14,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx);
* We also update VMA flags if appropriate by manipulating the VMA flags pointed
* to by vm_flags_ptr.
*/
-int memfd_check_seals_mmap(struct file *file, unsigned long *vm_flags_ptr);
+int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr);
#else
static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a)
{
@@ -25,7 +25,7 @@ static inline struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx)
return ERR_PTR(-EINVAL);
}
static inline int memfd_check_seals_mmap(struct file *file,
- unsigned long *vm_flags_ptr)
+ vm_flags_t *vm_flags_ptr)
{
return 0;
}
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 0dc0cf2863e2..7a805796fcfd 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -18,7 +18,7 @@
* adistance value (slightly faster) than default DRAM adistance to be part of
* the same memory tier.
*/
-#define MEMTIER_ADISTANCE_DRAM ((4 * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
+#define MEMTIER_ADISTANCE_DRAM ((4L * MEMTIER_CHUNK_SIZE) + (MEMTIER_CHUNK_SIZE >> 1))
struct memory_tier;
struct memory_dev_type {
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 5ec4e6d209b9..40eb70ccb09d 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -109,8 +109,6 @@ struct memory_notify {
unsigned long altmap_nr_pages;
unsigned long start_pfn;
unsigned long nr_pages;
- int status_change_nid_normal;
- int status_change_nid;
};
struct notifier_block;
@@ -179,12 +177,30 @@ struct memory_group *memory_group_find_by_id(int mgid);
typedef int (*walk_memory_groups_func_t)(struct memory_group *, void *);
int walk_dynamic_memory_groups(int nid, walk_memory_groups_func_t func,
struct memory_group *excluded, void *arg);
+struct memory_block *find_memory_block_by_id(unsigned long block_id);
#define hotplug_memory_notifier(fn, pri) ({ \
static __meminitdata struct notifier_block fn##_mem_nb =\
{ .notifier_call = fn, .priority = pri };\
register_memory_notifier(&fn##_mem_nb); \
})
+extern int sections_per_block;
+
+static inline unsigned long memory_block_id(unsigned long section_nr)
+{
+ return section_nr / sections_per_block;
+}
+
+static inline unsigned long pfn_to_block_id(unsigned long pfn)
+{
+ return memory_block_id(pfn_to_section_nr(pfn));
+}
+
+static inline unsigned long phys_to_block_id(unsigned long phys)
+{
+ return pfn_to_block_id(PFN_DOWN(phys));
+}
+
#ifdef CONFIG_NUMA
void memory_block_add_nid(struct memory_block *mem, int nid,
enum meminit_context context);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index eaac5ae8c05c..23f038a16231 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -314,7 +314,8 @@ extern int add_memory_driver_managed(int nid, u64 start, u64 size,
mhp_t mhp_flags);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages,
- struct vmem_altmap *altmap, int migratetype);
+ struct vmem_altmap *altmap, int migratetype,
+ bool isolate_pageblock);
extern void remove_pfn_range_from_zone(struct zone *zone,
unsigned long start_pfn,
unsigned long nr_pages);
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index 556375b91316..9acd703dd5ca 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -10,11 +10,13 @@
#ifndef __LINUX_MFD_DAVINCI_VOICECODEC_H_
#define __LINUX_MFD_DAVINCI_VOICECODEC_H_
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
+#include <linux/bits.h>
#include <linux/mfd/core.h>
-#include <linux/platform_data/edma.h>
+#include <linux/types.h>
+struct clk;
+struct device;
+struct platform_device;
struct regmap;
/*
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
index 32e3470708ed..7e84738cbb20 100644
--- a/include/linux/mfd/madera/pdata.h
+++ b/include/linux/mfd/madera/pdata.h
@@ -8,10 +8,11 @@
#ifndef MADERA_PDATA_H
#define MADERA_PDATA_H
-#include <linux/kernel.h>
#include <linux/regulator/arizona-ldo1.h>
#include <linux/regulator/arizona-micsupp.h>
#include <linux/regulator/machine.h>
+#include <linux/types.h>
+
#include <sound/madera-pdata.h>
#define MADERA_MAX_MICBIAS 4
diff --git a/include/linux/mfd/pcf50633/core.h b/include/linux/mfd/pcf50633/core.h
deleted file mode 100644
index 42d2b0e4884e..000000000000
--- a/include/linux/mfd/pcf50633/core.h
+++ /dev/null
@@ -1,229 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * core.h -- Core driver for NXP PCF50633
- *
- * (C) 2006-2008 by Openmoko, Inc.
- * All rights reserved.
- */
-
-#ifndef __LINUX_MFD_PCF50633_CORE_H
-#define __LINUX_MFD_PCF50633_CORE_H
-
-#include <linux/i2c.h>
-#include <linux/workqueue.h>
-#include <linux/regulator/driver.h>
-#include <linux/regulator/machine.h>
-#include <linux/pm.h>
-#include <linux/power_supply.h>
-
-struct pcf50633;
-struct regmap;
-
-#define PCF50633_NUM_REGULATORS 11
-
-struct pcf50633_platform_data {
- struct regulator_init_data reg_init_data[PCF50633_NUM_REGULATORS];
-
- char **batteries;
- int num_batteries;
-
- /*
- * Should be set accordingly to the reference resistor used, see
- * I_{ch(ref)} charger reference current in the pcf50633 User
- * Manual.
- */
- int charger_reference_current_ma;
-
- /* Callbacks */
- void (*probe_done)(struct pcf50633 *);
- void (*mbc_event_callback)(struct pcf50633 *, int);
- void (*regulator_registered)(struct pcf50633 *, int);
- void (*force_shutdown)(struct pcf50633 *);
-
- u8 resumers[5];
-};
-
-struct pcf50633_irq {
- void (*handler) (int, void *);
- void *data;
-};
-
-int pcf50633_register_irq(struct pcf50633 *pcf, int irq,
- void (*handler) (int, void *), void *data);
-int pcf50633_free_irq(struct pcf50633 *pcf, int irq);
-
-int pcf50633_irq_mask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_unmask(struct pcf50633 *pcf, int irq);
-int pcf50633_irq_mask_get(struct pcf50633 *pcf, int irq);
-
-int pcf50633_read_block(struct pcf50633 *, u8 reg,
- int nr_regs, u8 *data);
-int pcf50633_write_block(struct pcf50633 *pcf, u8 reg,
- int nr_regs, u8 *data);
-u8 pcf50633_reg_read(struct pcf50633 *, u8 reg);
-int pcf50633_reg_write(struct pcf50633 *pcf, u8 reg, u8 val);
-
-int pcf50633_reg_set_bit_mask(struct pcf50633 *pcf, u8 reg, u8 mask, u8 val);
-int pcf50633_reg_clear_bits(struct pcf50633 *pcf, u8 reg, u8 bits);
-
-/* Interrupt registers */
-
-#define PCF50633_REG_INT1 0x02
-#define PCF50633_REG_INT2 0x03
-#define PCF50633_REG_INT3 0x04
-#define PCF50633_REG_INT4 0x05
-#define PCF50633_REG_INT5 0x06
-
-#define PCF50633_REG_INT1M 0x07
-#define PCF50633_REG_INT2M 0x08
-#define PCF50633_REG_INT3M 0x09
-#define PCF50633_REG_INT4M 0x0a
-#define PCF50633_REG_INT5M 0x0b
-
-enum {
- /* Chip IRQs */
- PCF50633_IRQ_ADPINS,
- PCF50633_IRQ_ADPREM,
- PCF50633_IRQ_USBINS,
- PCF50633_IRQ_USBREM,
- PCF50633_IRQ_RESERVED1,
- PCF50633_IRQ_RESERVED2,
- PCF50633_IRQ_ALARM,
- PCF50633_IRQ_SECOND,
- PCF50633_IRQ_ONKEYR,
- PCF50633_IRQ_ONKEYF,
- PCF50633_IRQ_EXTON1R,
- PCF50633_IRQ_EXTON1F,
- PCF50633_IRQ_EXTON2R,
- PCF50633_IRQ_EXTON2F,
- PCF50633_IRQ_EXTON3R,
- PCF50633_IRQ_EXTON3F,
- PCF50633_IRQ_BATFULL,
- PCF50633_IRQ_CHGHALT,
- PCF50633_IRQ_THLIMON,
- PCF50633_IRQ_THLIMOFF,
- PCF50633_IRQ_USBLIMON,
- PCF50633_IRQ_USBLIMOFF,
- PCF50633_IRQ_ADCRDY,
- PCF50633_IRQ_ONKEY1S,
- PCF50633_IRQ_LOWSYS,
- PCF50633_IRQ_LOWBAT,
- PCF50633_IRQ_HIGHTMP,
- PCF50633_IRQ_AUTOPWRFAIL,
- PCF50633_IRQ_DWN1PWRFAIL,
- PCF50633_IRQ_DWN2PWRFAIL,
- PCF50633_IRQ_LEDPWRFAIL,
- PCF50633_IRQ_LEDOVP,
- PCF50633_IRQ_LDO1PWRFAIL,
- PCF50633_IRQ_LDO2PWRFAIL,
- PCF50633_IRQ_LDO3PWRFAIL,
- PCF50633_IRQ_LDO4PWRFAIL,
- PCF50633_IRQ_LDO5PWRFAIL,
- PCF50633_IRQ_LDO6PWRFAIL,
- PCF50633_IRQ_HCLDOPWRFAIL,
- PCF50633_IRQ_HCLDOOVL,
-
- /* Always last */
- PCF50633_NUM_IRQ,
-};
-
-struct pcf50633 {
- struct device *dev;
- struct regmap *regmap;
-
- struct pcf50633_platform_data *pdata;
- int irq;
- struct pcf50633_irq irq_handler[PCF50633_NUM_IRQ];
- struct work_struct irq_work;
- struct workqueue_struct *work_queue;
- struct mutex lock;
-
- u8 mask_regs[5];
-
- u8 suspend_irq_masks[5];
- u8 resume_reason[5];
- int is_suspended;
-
- int onkey1s_held;
-
- struct platform_device *rtc_pdev;
- struct platform_device *mbc_pdev;
- struct platform_device *adc_pdev;
- struct platform_device *input_pdev;
- struct platform_device *bl_pdev;
- struct platform_device *regulator_pdev[PCF50633_NUM_REGULATORS];
-};
-
-enum pcf50633_reg_int1 {
- PCF50633_INT1_ADPINS = 0x01, /* Adapter inserted */
- PCF50633_INT1_ADPREM = 0x02, /* Adapter removed */
- PCF50633_INT1_USBINS = 0x04, /* USB inserted */
- PCF50633_INT1_USBREM = 0x08, /* USB removed */
- /* reserved */
- PCF50633_INT1_ALARM = 0x40, /* RTC alarm time is reached */
- PCF50633_INT1_SECOND = 0x80, /* RTC periodic second interrupt */
-};
-
-enum pcf50633_reg_int2 {
- PCF50633_INT2_ONKEYR = 0x01, /* ONKEY rising edge */
- PCF50633_INT2_ONKEYF = 0x02, /* ONKEY falling edge */
- PCF50633_INT2_EXTON1R = 0x04, /* EXTON1 rising edge */
- PCF50633_INT2_EXTON1F = 0x08, /* EXTON1 falling edge */
- PCF50633_INT2_EXTON2R = 0x10, /* EXTON2 rising edge */
- PCF50633_INT2_EXTON2F = 0x20, /* EXTON2 falling edge */
- PCF50633_INT2_EXTON3R = 0x40, /* EXTON3 rising edge */
- PCF50633_INT2_EXTON3F = 0x80, /* EXTON3 falling edge */
-};
-
-enum pcf50633_reg_int3 {
- PCF50633_INT3_BATFULL = 0x01, /* Battery full */
- PCF50633_INT3_CHGHALT = 0x02, /* Charger halt */
- PCF50633_INT3_THLIMON = 0x04,
- PCF50633_INT3_THLIMOFF = 0x08,
- PCF50633_INT3_USBLIMON = 0x10,
- PCF50633_INT3_USBLIMOFF = 0x20,
- PCF50633_INT3_ADCRDY = 0x40, /* ADC result ready */
- PCF50633_INT3_ONKEY1S = 0x80, /* ONKEY pressed 1 second */
-};
-
-enum pcf50633_reg_int4 {
- PCF50633_INT4_LOWSYS = 0x01,
- PCF50633_INT4_LOWBAT = 0x02,
- PCF50633_INT4_HIGHTMP = 0x04,
- PCF50633_INT4_AUTOPWRFAIL = 0x08,
- PCF50633_INT4_DWN1PWRFAIL = 0x10,
- PCF50633_INT4_DWN2PWRFAIL = 0x20,
- PCF50633_INT4_LEDPWRFAIL = 0x40,
- PCF50633_INT4_LEDOVP = 0x80,
-};
-
-enum pcf50633_reg_int5 {
- PCF50633_INT5_LDO1PWRFAIL = 0x01,
- PCF50633_INT5_LDO2PWRFAIL = 0x02,
- PCF50633_INT5_LDO3PWRFAIL = 0x04,
- PCF50633_INT5_LDO4PWRFAIL = 0x08,
- PCF50633_INT5_LDO5PWRFAIL = 0x10,
- PCF50633_INT5_LDO6PWRFAIL = 0x20,
- PCF50633_INT5_HCLDOPWRFAIL = 0x40,
- PCF50633_INT5_HCLDOOVL = 0x80,
-};
-
-/* misc. registers */
-#define PCF50633_REG_OOCSHDWN 0x0c
-
-/* LED registers */
-#define PCF50633_REG_LEDOUT 0x28
-#define PCF50633_REG_LEDENA 0x29
-#define PCF50633_REG_LEDCTL 0x2a
-#define PCF50633_REG_LEDDIM 0x2b
-
-static inline struct pcf50633 *dev_to_pcf50633(struct device *dev)
-{
- return dev_get_drvdata(dev);
-}
-
-int pcf50633_irq_init(struct pcf50633 *pcf, int irq);
-void pcf50633_irq_free(struct pcf50633 *pcf);
-extern const struct dev_pm_ops pcf50633_pm;
-
-#endif
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 69cbea78b430..28170ee08898 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -812,6 +812,8 @@ enum rk806_pin_dr_sel {
#define RK806_INT_POL_H BIT(1)
#define RK806_INT_POL_L 0
+/* SYS_CFG3 */
+#define RK806_RST_FUN_MSK GENMASK(7, 6)
#define RK806_SLAVE_RESTART_FUN_MSK BIT(1)
#define RK806_SLAVE_RESTART_FUN_EN BIT(1)
#define RK806_SLAVE_RESTART_FUN_OFF 0
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index e9e24f4c4578..9b9119c742a2 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -11,9 +11,11 @@
#ifndef _LINUX_MFD_SYSCON_ATMEL_SMC_H_
#define _LINUX_MFD_SYSCON_ATMEL_SMC_H_
-#include <linux/kernel.h>
-#include <linux/of.h>
-#include <linux/regmap.h>
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct device_node;
+struct regmap;
#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
#define ATMEL_HSMC_SETUP(layout, cs) \
diff --git a/include/linux/mfd/tps65219.h b/include/linux/mfd/tps65219.h
index 3e8d29189267..55234e771ba7 100644
--- a/include/linux/mfd/tps65219.h
+++ b/include/linux/mfd/tps65219.h
@@ -10,7 +10,6 @@
#define MFD_TPS65219_H
#include <linux/bitops.h>
-#include <linux/notifier.h>
#include <linux/regmap.h>
#include <linux/regulator/driver.h>
@@ -438,17 +437,13 @@ enum tps65219_irqs {
*
* @dev: MFD device
* @regmap: Regmap for accessing the device registers
- * @chip_id: Chip ID
* @irq_data: Regmap irq data used for the irq chip
- * @nb: notifier block for the restart handler
*/
struct tps65219 {
struct device *dev;
struct regmap *regmap;
- unsigned int chip_id;
struct regmap_irq_chip_data *irq_data;
- struct notifier_block nb;
};
#endif /* MFD_TPS65219_H */
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 85dc406173db..b31e07fa4d51 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -205,27 +205,6 @@ int twl_get_hfclk_rate(void);
int twl6030_interrupt_unmask(u8 bit_mask, u8 offset);
int twl6030_interrupt_mask(u8 bit_mask, u8 offset);
-/* Card detect Configuration for MMC1 Controller on OMAP4 */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect_config(void);
-#else
-static inline int twl6030_mmc_card_detect_config(void)
-{
- pr_debug("twl6030_mmc_card_detect_config not supported\n");
- return 0;
-}
-#endif
-
-/* MMC1 Controller on OMAP4 uses Phoenix irq for Card detect */
-#ifdef CONFIG_TWL4030_CORE
-int twl6030_mmc_card_detect(struct device *dev, int slot);
-#else
-static inline int twl6030_mmc_card_detect(struct device *dev, int slot)
-{
- pr_debug("Call back twl6030_mmc_card_detect not supported\n");
- return -EIO;
-}
-#endif
/*----------------------------------------------------------------------*/
/*
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index a3241e4d7548..5f70d3b5d1b1 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -8,11 +8,12 @@
#ifndef __LINUX_MFD_WM8350_CORE_H_
#define __LINUX_MFD_WM8350_CORE_H_
-#include <linux/kernel.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
#include <linux/completion.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/mutex.h>
#include <linux/regmap.h>
+#include <linux/types.h>
#include <linux/mfd/wm8350/audio.h>
#include <linux/mfd/wm8350/gpio.h>
@@ -21,6 +22,9 @@
#include <linux/mfd/wm8350/supply.h>
#include <linux/mfd/wm8350/wdt.h>
+struct device;
+struct platform_device;
+
/*
* Register values.
*/
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index aaa2114498d6..acadd41e0b5c 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -35,8 +35,8 @@ struct migration_target_control;
* @src page. The driver should copy the contents of the
* @src page to the @dst page and set up the fields of @dst page.
* Both pages are locked.
- * If page migration is successful, the driver should call
- * __ClearPageMovable(@src) and return MIGRATEPAGE_SUCCESS.
+ * If page migration is successful, the driver should
+ * return MIGRATEPAGE_SUCCESS.
* If the driver cannot migrate the page at the moment, it can return
* -EAGAIN. The VM interprets this as a temporary migration failure and
* will retry it later. Any other error value is a permanent migration
@@ -69,7 +69,7 @@ int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded);
struct folio *alloc_migration_target(struct folio *src, unsigned long private);
-bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode);
bool isolate_folio_to_list(struct folio *folio, struct list_head *list);
int migrate_huge_page_move_mapping(struct address_space *mapping,
@@ -90,7 +90,7 @@ static inline int migrate_pages(struct list_head *l, new_folio_t new,
static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private)
{ return NULL; }
-static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+static inline bool isolate_movable_ops_page(struct page *page, isolate_mode_t mode)
{ return false; }
static inline bool isolate_folio_to_list(struct folio *folio, struct list_head *list)
{ return false; }
@@ -103,44 +103,6 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
-#ifdef CONFIG_COMPACTION
-bool PageMovable(struct page *page);
-void __SetPageMovable(struct page *page, const struct movable_operations *ops);
-void __ClearPageMovable(struct page *page);
-#else
-static inline bool PageMovable(struct page *page) { return false; }
-static inline void __SetPageMovable(struct page *page,
- const struct movable_operations *ops)
-{
-}
-static inline void __ClearPageMovable(struct page *page)
-{
-}
-#endif
-
-static inline bool folio_test_movable(struct folio *folio)
-{
- return PageMovable(&folio->page);
-}
-
-static inline
-const struct movable_operations *folio_movable_ops(struct folio *folio)
-{
- VM_BUG_ON(!__folio_test_movable(folio));
-
- return (const struct movable_operations *)
- ((unsigned long)folio->mapping - PAGE_MAPPING_MOVABLE);
-}
-
-static inline
-const struct movable_operations *page_movable_ops(struct page *page)
-{
- VM_BUG_ON(!__PageMovable(page));
-
- return (const struct movable_operations *)
- ((unsigned long)page->mapping - PAGE_MAPPING_MOVABLE);
-}
-
#ifdef CONFIG_NUMA_BALANCING
int migrate_misplaced_folio_prepare(struct folio *folio,
struct vm_area_struct *vma, int node);
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index e6ba8f4f4bd1..8c5fbfb85749 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -36,6 +36,7 @@
#include <linux/kernel.h>
#include <linux/completion.h>
#include <linux/pci.h>
+#include <linux/pci-tph.h>
#include <linux/irq.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
@@ -688,6 +689,7 @@ struct mlx5_fw_tracer;
struct mlx5_vxlan;
struct mlx5_geneve;
struct mlx5_hv_vhca;
+struct mlx5_st;
#define MLX5_LOG_SW_ICM_BLOCK_SIZE(dev) (MLX5_CAP_DEV_MEM(dev, log_sw_icm_alloc_granularity))
#define MLX5_SW_ICM_BLOCK_SIZE(dev) (1 << MLX5_LOG_SW_ICM_BLOCK_SIZE(dev))
@@ -757,6 +759,7 @@ struct mlx5_core_dev {
u32 issi;
struct mlx5e_resources mlx5e_res;
struct mlx5_dm *dm;
+ struct mlx5_st *st;
struct mlx5_vxlan *vxlan;
struct mlx5_geneve *geneve;
struct {
@@ -1160,6 +1163,23 @@ int mlx5_dm_sw_icm_alloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
int mlx5_dm_sw_icm_dealloc(struct mlx5_core_dev *dev, enum mlx5_sw_icm_type type,
u64 length, u16 uid, phys_addr_t addr, u32 obj_id);
+#ifdef CONFIG_PCIE_TPH
+int mlx5_st_alloc_index(struct mlx5_core_dev *dev, enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index);
+int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index);
+#else
+static inline int mlx5_st_alloc_index(struct mlx5_core_dev *dev,
+ enum tph_mem_type mem_type,
+ unsigned int cpu_uid, u16 *st_index)
+{
+ return -EOPNOTSUPP;
+}
+static inline int mlx5_st_dealloc_index(struct mlx5_core_dev *dev, u16 st_index)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev);
void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev);
@@ -1349,4 +1369,9 @@ enum {
};
bool mlx5_wc_support_get(struct mlx5_core_dev *mdev);
+
+static inline struct net *mlx5_core_net(struct mlx5_core_dev *dev)
+{
+ return devlink_net(priv_to_devlink(dev));
+}
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index ed4130e49c27..8360d9011d4f 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -1871,7 +1871,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_280[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_at_2a0[0xb];
+ u8 reserved_at_2a0[0x7];
+ u8 mkey_pcie_tph[0x1];
+ u8 reserved_at_2a8[0x3];
u8 shampo[0x1];
u8 reserved_at_2ac[0x4];
u8 max_wqe_sz_rq[0x10];
@@ -4418,6 +4420,10 @@ enum {
MLX5_MKC_ACCESS_MODE_CROSSING = 0x6,
};
+enum {
+ MLX5_MKC_PCIE_TPH_NO_STEERING_TAG_INDEX = 0,
+};
+
struct mlx5_ifc_mkc_bits {
u8 reserved_at_0[0x1];
u8 free[0x1];
@@ -4469,7 +4475,11 @@ struct mlx5_ifc_mkc_bits {
u8 relaxed_ordering_read[0x1];
u8 log_page_size[0x6];
- u8 reserved_at_1e0[0x20];
+ u8 reserved_at_1e0[0x5];
+ u8 pcie_tph_en[0x1];
+ u8 pcie_tph_ph[0x2];
+ u8 pcie_tph_steering_tag_index[0x8];
+ u8 reserved_at_1f0[0x10];
};
struct mlx5_ifc_pkey_bits {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 00cd8415c0a0..349f0d9aad22 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1325,6 +1325,8 @@ static inline void get_page(struct page *page)
struct folio *folio = page_folio(page);
if (WARN_ON_ONCE(folio_test_slab(folio)))
return;
+ if (WARN_ON_ONCE(folio_test_large_kmalloc(folio)))
+ return;
folio_get(folio);
}
@@ -1419,7 +1421,7 @@ static inline void put_page(struct page *page)
{
struct folio *folio = page_folio(page);
- if (folio_test_slab(folio))
+ if (folio_test_slab(folio) || folio_test_large_kmalloc(folio))
return;
folio_put(folio);
@@ -1816,7 +1818,24 @@ static inline pmd_t folio_mk_pmd(struct folio *folio, pgprot_t pgprot)
{
return pmd_mkhuge(pfn_pmd(folio_pfn(folio), pgprot));
}
-#endif
+
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+/**
+ * folio_mk_pud - Create a PUD for this folio
+ * @folio: The folio to create a PUD for
+ * @pgprot: The page protection bits to use
+ *
+ * Create a page table entry for the first page of this folio.
+ * This is suitable for passing to set_pud_at().
+ *
+ * Return: A page table entry suitable for mapping this folio.
+ */
+static inline pud_t folio_mk_pud(struct folio *folio, pgprot_t pgprot)
+{
+ return pud_mkhuge(pfn_pud(folio_pfn(folio), pgprot));
+}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_MMU */
static inline bool folio_has_pincount(const struct folio *folio)
@@ -2150,13 +2169,13 @@ static inline int folio_expected_ref_count(const struct folio *folio)
const int order = folio_order(folio);
int ref_count = 0;
- if (WARN_ON_ONCE(folio_test_slab(folio)))
+ if (WARN_ON_ONCE(page_has_type(&folio->page) && !folio_test_hugetlb(folio)))
return 0;
if (folio_test_anon(folio)) {
/* One reference per page from the swapcache. */
ref_count += folio_test_swapcache(folio) << order;
- } else if (!((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS)) {
+ } else {
/* One reference per page from the pagecache. */
ref_count += !!folio->mapping << order;
/* One reference from PG_private. */
@@ -2547,7 +2566,7 @@ extern long change_protection(struct mmu_gather *tlb,
unsigned long end, unsigned long cp_flags);
extern int mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
struct vm_area_struct *vma, struct vm_area_struct **pprev,
- unsigned long start, unsigned long end, unsigned long newflags);
+ unsigned long start, unsigned long end, vm_flags_t newflags);
/*
* doesn't attempt to fault and will return short.
@@ -2692,13 +2711,6 @@ static inline pud_t pud_mkspecial(pud_t pud)
}
#endif /* CONFIG_ARCH_SUPPORTS_PUD_PFNMAP */
-#ifndef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline int pte_devmap(pte_t pte)
-{
- return 0;
-}
-#endif
-
extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl);
static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
@@ -3311,9 +3323,9 @@ extern void vm_stat_account(struct mm_struct *, vm_flags_t, long npages);
extern bool vma_is_special_mapping(const struct vm_area_struct *vma,
const struct vm_special_mapping *sm);
-extern struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
+struct vm_area_struct *_install_special_mapping(struct mm_struct *mm,
unsigned long addr, unsigned long len,
- unsigned long flags,
+ vm_flags_t vm_flags,
const struct vm_special_mapping *spec);
unsigned long randomize_stack_top(unsigned long stack_top);
@@ -3477,10 +3489,10 @@ static inline bool range_in_vma(struct vm_area_struct *vma,
}
#ifdef CONFIG_MMU
-pgprot_t vm_get_page_prot(unsigned long vm_flags);
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
void vma_set_page_prot(struct vm_area_struct *vma);
#else
-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
{
return __pgprot(0);
}
@@ -3517,9 +3529,9 @@ vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
- pfn_t pfn);
+ unsigned long pfn);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
- unsigned long addr, pfn_t pfn);
+ unsigned long addr, unsigned long pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
static inline vm_fault_t vmf_insert_page(struct vm_area_struct *vma,
@@ -4048,14 +4060,14 @@ unsigned long wp_shared_mapping_range(struct address_space *mapping,
#endif
#ifdef CONFIG_ANON_VMA_NAME
-int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in,
- struct anon_vma_name *anon_name);
+int set_anon_vma_name(unsigned long addr, unsigned long size,
+ const char __user *uname);
#else
-static inline int
-madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
- unsigned long len_in, struct anon_vma_name *anon_name) {
- return 0;
+static inline
+int set_anon_vma_name(unsigned long addr, unsigned long size,
+ const char __user *uname)
+{
+ return -EINVAL;
}
#endif
@@ -4188,4 +4200,23 @@ static inline bool page_pool_page_is_pp(const struct page *page)
}
#endif
+#define PAGE_SNAPSHOT_FAITHFUL (1 << 0)
+#define PAGE_SNAPSHOT_PG_BUDDY (1 << 1)
+#define PAGE_SNAPSHOT_PG_IDLE (1 << 2)
+
+struct page_snapshot {
+ struct folio folio_snapshot;
+ struct page page_snapshot;
+ unsigned long pfn;
+ unsigned long idx;
+ unsigned long flags;
+};
+
+static inline bool snapshot_page_is_faithful(const struct page_snapshot *ps)
+{
+ return ps->flags & PAGE_SNAPSHOT_FAITHFUL;
+}
+
+void snapshot_page(struct page_snapshot *ps, const struct page *page);
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 0f0662157066..08bc2442db93 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -105,7 +105,6 @@ struct page {
unsigned int order;
};
};
- /* See page-flags.h for PAGE_MAPPING_FLAGS */
struct address_space *mapping;
union {
pgoff_t __folio_index; /* Our offset within mapping. */
@@ -1086,7 +1085,7 @@ struct mm_struct {
unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ vm_flags_t def_flags;
/**
* @write_protect_seq: Locked when any thread is write
diff --git a/include/linux/mman.h b/include/linux/mman.h
index f4c6346a8fcd..de9e8e6229a4 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -137,7 +137,7 @@ static inline bool arch_validate_flags(unsigned long flags)
/*
* Combine the mmap "prot" argument into "vm_flags" used internally.
*/
-static inline unsigned long
+static inline vm_flags_t
calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
{
return _calc_vm_trans(prot, PROT_READ, VM_READ ) |
@@ -149,7 +149,7 @@ calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
/*
* Combine the mmap "flags" argument into "vm_flags" used internally.
*/
-static inline unsigned long
+static inline vm_flags_t
calc_vm_flag_bits(struct file *file, unsigned long flags)
{
return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 5da384bd0a26..1f4f44951abe 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -309,6 +309,17 @@ void vma_mark_detached(struct vm_area_struct *vma);
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
+/*
+ * Locks next vma pointed by the iterator. Confirms the locked vma has not
+ * been modified and will retry under mmap_lock protection if modification
+ * was detected. Should be called from read RCU section.
+ * Returns either a valid locked VMA, NULL if no more VMAs or -EINTR if the
+ * process was interrupted.
+ */
+struct vm_area_struct *lock_next_vma(struct mm_struct *mm,
+ struct vma_iterator *iter,
+ unsigned long address);
+
#else /* CONFIG_PER_VMA_LOCK */
static inline void mm_lock_seqcount_init(struct mm_struct *mm) {}
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index a0a3894900ed..14a45979cccc 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -89,6 +89,17 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
} \
unlikely(__ret_warn_once); \
})
+#define VM_WARN_ON_ONCE_VMA(cond, vma) ({ \
+ static bool __section(".data..once") __warned; \
+ int __ret_warn_once = !!(cond); \
+ \
+ if (unlikely(__ret_warn_once && !__warned)) { \
+ dump_vma(vma); \
+ __warned = true; \
+ WARN_ON(1); \
+ } \
+ unlikely(__ret_warn_once); \
+})
#define VM_WARN_ON_VMG(cond, vmg) ({ \
int __ret_warn = !!(cond); \
\
@@ -115,6 +126,7 @@ void vma_iter_dump_tree(const struct vma_iterator *vmi);
#define VM_WARN_ON_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_FOLIO(cond, folio) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_ONCE_MM(cond, mm) BUILD_BUG_ON_INVALID(cond)
+#define VM_WARN_ON_ONCE_VMA(cond, vma) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ON_VMG(cond, vmg) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN_ONCE(cond, format...) BUILD_BUG_ON_INVALID(cond)
#define VM_WARN(cond, format...) BUILD_BUG_ON_INVALID(cond)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 283913d42d7b..0c5da9141983 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -38,19 +38,19 @@
#define NR_PAGE_ORDERS (MAX_PAGE_ORDER + 1)
/* Defines the order for the number of pages that have a migrate type. */
-#ifndef CONFIG_PAGE_BLOCK_ORDER
-#define PAGE_BLOCK_ORDER MAX_PAGE_ORDER
+#ifndef CONFIG_PAGE_BLOCK_MAX_ORDER
+#define PAGE_BLOCK_MAX_ORDER MAX_PAGE_ORDER
#else
-#define PAGE_BLOCK_ORDER CONFIG_PAGE_BLOCK_ORDER
-#endif /* CONFIG_PAGE_BLOCK_ORDER */
+#define PAGE_BLOCK_MAX_ORDER CONFIG_PAGE_BLOCK_MAX_ORDER
+#endif /* CONFIG_PAGE_BLOCK_MAX_ORDER */
/*
* The MAX_PAGE_ORDER, which defines the max order of pages to be allocated
- * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_ORDER,
+ * by the buddy allocator, has to be larger or equal to the PAGE_BLOCK_MAX_ORDER,
* which defines the order for the number of pages that can have a migrate type
*/
-#if (PAGE_BLOCK_ORDER > MAX_PAGE_ORDER)
-#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_ORDER
+#if (PAGE_BLOCK_MAX_ORDER > MAX_PAGE_ORDER)
+#error MAX_PAGE_ORDER must be >= PAGE_BLOCK_MAX_ORDER
#endif
/*
@@ -79,6 +79,9 @@ enum migratetype {
* __free_pageblock_cma() function.
*/
MIGRATE_CMA,
+ __MIGRATE_TYPE_END = MIGRATE_CMA,
+#else
+ __MIGRATE_TYPE_END = MIGRATE_HIGHATOMIC,
#endif
#ifdef CONFIG_MEMORY_ISOLATION
MIGRATE_ISOLATE, /* can't allocate from here */
@@ -92,8 +95,12 @@ extern const char * const migratetype_names[MIGRATE_TYPES];
#ifdef CONFIG_CMA
# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
# define is_migrate_cma_page(_page) (get_pageblock_migratetype(_page) == MIGRATE_CMA)
-# define is_migrate_cma_folio(folio, pfn) (MIGRATE_CMA == \
- get_pfnblock_flags_mask(&folio->page, pfn, MIGRATETYPE_MASK))
+/*
+ * __dump_folio() in mm/debug.c passes a folio pointer to on-stack struct folio,
+ * so folio_pfn() cannot be used and pfn is needed.
+ */
+# define is_migrate_cma_folio(folio, pfn) \
+ (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA)
#else
# define is_migrate_cma(migratetype) false
# define is_migrate_cma_page(_page) false
@@ -122,14 +129,12 @@ static inline bool migratetype_is_mergeable(int mt)
extern int page_group_by_mobility_disabled;
-#define MIGRATETYPE_MASK ((1UL << PB_migratetype_bits) - 1)
+#define get_pageblock_migratetype(page) \
+ get_pfnblock_migratetype(page, page_to_pfn(page))
-#define get_pageblock_migratetype(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), MIGRATETYPE_MASK)
+#define folio_migratetype(folio) \
+ get_pageblock_migratetype(&folio->page)
-#define folio_migratetype(folio) \
- get_pfnblock_flags_mask(&folio->page, folio_pfn(folio), \
- MIGRATETYPE_MASK)
struct free_area {
struct list_head free_list[MIGRATE_TYPES];
unsigned long nr_free;
@@ -201,7 +206,6 @@ enum node_stat_item {
NR_FILE_PAGES,
NR_FILE_DIRTY,
NR_WRITEBACK,
- NR_WRITEBACK_TEMP, /* Writeback using temporary buffers */
NR_SHMEM, /* shmem pages (included tmpfs/GEM pages) */
NR_SHMEM_THPS,
NR_SHMEM_PMDMAPPED,
diff --git a/include/linux/module.h b/include/linux/module.h
index a845cc81cc87..a7cac01d95e7 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -14,6 +14,7 @@
#include <linux/buildid.h>
#include <linux/compiler.h>
#include <linux/cache.h>
+#include <linux/cleanup.h>
#include <linux/kmod.h>
#include <linux/init.h>
#include <linux/elf.h>
@@ -538,7 +539,7 @@ struct module {
struct trace_eval_map **trace_evals;
unsigned int num_trace_evals;
#endif
-#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#ifdef CONFIG_DYNAMIC_FTRACE
unsigned int num_ftrace_callsites;
unsigned long *ftrace_callsites;
#endif
@@ -1018,4 +1019,7 @@ static inline unsigned long find_kallsyms_symbol_value(struct module *mod,
#endif /* CONFIG_MODULES && CONFIG_KALLSYMS */
+/* Define __free(module_put) macro for struct module *. */
+DEFINE_FREE(module_put, struct module *, if (_T) module_put(_T))
+
#endif /* _LINUX_MODULE_H */
diff --git a/include/linux/msi.h b/include/linux/msi.h
index 71e5cc830393..e5e86a8529fb 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -707,6 +707,7 @@ struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
struct msi_domain_info *info,
struct irq_domain *parent);
u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
+u32 pci_msi_map_rid_ctlr_node(struct pci_dev *pdev, struct device_node **node);
struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
void pci_msix_prepare_desc(struct irq_domain *domain, msi_alloc_info_t *arg,
struct msi_desc *desc);
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h
index 1b56796f6cb3..288ef765a44e 100644
--- a/include/linux/mtd/map.h
+++ b/include/linux/mtd/map.h
@@ -8,15 +8,15 @@
#ifndef __LINUX_MTD_MAP_H__
#define __LINUX_MTD_MAP_H__
-#include <linux/types.h>
-#include <linux/list.h>
-#include <linux/string.h>
#include <linux/bug.h>
-#include <linux/kernel.h>
#include <linux/io.h>
-
+#include <linux/ioport.h>
+#include <linux/string.h>
+#include <linux/types.h>
#include <linux/unaligned.h>
-#include <asm/barrier.h>
+
+struct device_node;
+struct module;
#ifdef CONFIG_MTD_MAP_BANK_WIDTH_1
#define map_bankwidth(map) 1
@@ -188,6 +188,7 @@ typedef union {
of living.
*/
+struct mtd_chip_driver;
struct map_info {
const char *name;
unsigned long size;
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 15eaa09da998..27a45bdab7ec 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -62,30 +62,33 @@
SPI_MEM_OP_NO_DUMMY, \
SPI_MEM_OP_NO_DATA)
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, ...) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
SPI_MEM_OP_DATA_IN(len, buf, 1), \
- SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0))
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
- SPI_MEM_OP_ADDR(2, addr, 1), \
- SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x03, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_FAST_3A_1S_1S_1S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 1))
+ SPI_MEM_OP_DATA_IN(len, buf, 1), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_1D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x0d, 1), \
@@ -94,17 +97,19 @@
SPI_MEM_DTR_OP_DATA_IN(len, buf, 1), \
SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_2D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x3d, 1), \
@@ -113,18 +118,19 @@
SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, ...) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(2, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
SPI_MEM_OP_DATA_IN(len, buf, 2), \
- SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0))
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_2S_2S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
SPI_MEM_OP_ADDR(3, addr, 2), \
SPI_MEM_OP_DUMMY(ndummy, 2), \
- SPI_MEM_OP_DATA_IN(len, buf, 2))
+ SPI_MEM_OP_DATA_IN(len, buf, 2), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_2D_2D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xbd, 1), \
@@ -133,17 +139,19 @@
SPI_MEM_DTR_OP_DATA_IN(len, buf, 2), \
SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(2, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_1S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
SPI_MEM_OP_ADDR(3, addr, 1), \
SPI_MEM_OP_DUMMY(ndummy, 1), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_1D_4D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0x6d, 1), \
@@ -152,18 +160,19 @@
SPI_MEM_DTR_OP_DATA_IN(len, buf, 4), \
SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, ...) \
+#define SPINAND_PAGE_READ_FROM_CACHE_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(2, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
SPI_MEM_OP_DATA_IN(len, buf, 4), \
- SPI_MEM_OP_MAX_FREQ(__VA_ARGS__ + 0))
+ SPI_MEM_OP_MAX_FREQ(freq))
-#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len) \
+#define SPINAND_PAGE_READ_FROM_CACHE_3A_1S_4S_4S_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
SPI_MEM_OP_ADDR(3, addr, 4), \
SPI_MEM_OP_DUMMY(ndummy, 4), \
- SPI_MEM_OP_DATA_IN(len, buf, 4))
+ SPI_MEM_OP_DATA_IN(len, buf, 4), \
+ SPI_MEM_OP_MAX_FREQ(freq))
#define SPINAND_PAGE_READ_FROM_CACHE_1S_4D_4D_OP(addr, ndummy, buf, len, freq) \
SPI_MEM_OP(SPI_MEM_OP_CMD(0xed, 1), \
@@ -484,6 +493,7 @@ struct spinand_user_otp {
* @op_variants.update_cache: variants of the update-cache operation
* @select_target: function used to select a target/die. Required only for
* multi-die chips
+ * @configure_chip: Align the chip configuration with the core settings
* @set_cont_read: enable/disable continuous cached reads
* @fact_otp: SPI NAND factory OTP info.
* @user_otp: SPI NAND user OTP info.
@@ -507,6 +517,7 @@ struct spinand_info {
} op_variants;
int (*select_target)(struct spinand_device *spinand,
unsigned int target);
+ int (*configure_chip)(struct spinand_device *spinand);
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
struct spinand_fact_otp fact_otp;
@@ -539,6 +550,9 @@ struct spinand_info {
#define SPINAND_SELECT_TARGET(__func) \
.select_target = __func
+#define SPINAND_CONFIGURE_CHIP(__configure_chip) \
+ .configure_chip = __configure_chip
+
#define SPINAND_CONT_READ(__set_cont_read) \
.set_cont_read = __set_cont_read
@@ -607,6 +621,7 @@ struct spinand_dirmap {
* passed in spi_mem_op be DMA-able, so we can't based the bufs on
* the stack
* @manufacturer: SPI NAND manufacturer information
+ * @configure_chip: Align the chip configuration with the core settings
* @cont_read_possible: Field filled by the core once the whole system
* configuration is known to tell whether continuous reads are
* suitable to use or not in general with this chip/configuration.
@@ -647,6 +662,7 @@ struct spinand_device {
const struct spinand_manufacturer *manufacturer;
void *priv;
+ int (*configure_chip)(struct spinand_device *spinand);
bool cont_read_possible;
int (*set_cont_read)(struct spinand_device *spinand,
bool enable);
@@ -723,7 +739,9 @@ int spinand_match_and_init(struct spinand_device *spinand,
enum spinand_readid_method rdid_method);
int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val);
int spinand_write_reg_op(struct spinand_device *spinand, u8 reg, u8 val);
+int spinand_write_enable_op(struct spinand_device *spinand);
int spinand_select_target(struct spinand_device *spinand, unsigned int target);
int spinand_wait(struct spinand_device *spinand, unsigned long initial_delay_us,
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h
index 562f92504f2b..c3f79c4be1cc 100644
--- a/include/linux/mtd/ubi.h
+++ b/include/linux/mtd/ubi.h
@@ -250,7 +250,6 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum);
int ubi_leb_map(struct ubi_volume_desc *desc, int lnum);
int ubi_is_mapped(struct ubi_volume_desc *desc, int lnum);
int ubi_sync(int ubi_num);
-int ubi_flush(int ubi_num, int vol_id, int lnum);
/*
* This function is the same as the 'ubi_leb_read()' function, but it does not
diff --git a/include/linux/node.h b/include/linux/node.h
index 2b7517892230..2c7529335b21 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -111,43 +111,64 @@ struct memory_block;
extern struct node *node_devices[];
#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
-void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context);
+void register_memory_blocks_under_node_hotplug(int nid, unsigned long start_pfn,
+ unsigned long end_pfn);
#else
-static inline void register_memory_blocks_under_node(int nid, unsigned long start_pfn,
- unsigned long end_pfn,
- enum meminit_context context)
+static inline void register_memory_blocks_under_node_hotplug(int nid,
+ unsigned long start_pfn,
+ unsigned long end_pfn)
+{
+}
+static inline void register_memory_blocks_under_nodes(void)
{
}
#endif
extern void unregister_node(struct node *node);
-#ifdef CONFIG_NUMA
-extern void node_dev_init(void);
-/* Core of the node registration - only memory hotplug should use this */
-extern int __register_one_node(int nid);
-
-/* Registers an online node */
-static inline int register_one_node(int nid)
-{
- int error = 0;
- if (node_online(nid)) {
- struct pglist_data *pgdat = NODE_DATA(nid);
- unsigned long start_pfn = pgdat->node_start_pfn;
- unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
+struct node_notify {
+ int nid;
+};
- error = __register_one_node(nid);
- if (error)
- return error;
- register_memory_blocks_under_node(nid, start_pfn, end_pfn,
- MEMINIT_EARLY);
- }
+#define NODE_ADDING_FIRST_MEMORY (1<<0)
+#define NODE_ADDED_FIRST_MEMORY (1<<1)
+#define NODE_CANCEL_ADDING_FIRST_MEMORY (1<<2)
+#define NODE_REMOVING_LAST_MEMORY (1<<3)
+#define NODE_REMOVED_LAST_MEMORY (1<<4)
+#define NODE_CANCEL_REMOVING_LAST_MEMORY (1<<5)
- return error;
+#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_NUMA)
+extern int register_node_notifier(struct notifier_block *nb);
+extern void unregister_node_notifier(struct notifier_block *nb);
+extern int node_notify(unsigned long val, void *v);
+
+#define hotplug_node_notifier(fn, pri) ({ \
+ static __meminitdata struct notifier_block fn##_node_nb =\
+ { .notifier_call = fn, .priority = pri };\
+ register_node_notifier(&fn##_node_nb); \
+})
+#else
+static inline int register_node_notifier(struct notifier_block *nb)
+{
+ return 0;
+}
+static inline void unregister_node_notifier(struct notifier_block *nb)
+{
+}
+static inline int node_notify(unsigned long val, void *v)
+{
+ return 0;
+}
+static inline int hotplug_node_notifier(notifier_fn_t fn, int pri)
+{
+ return 0;
}
+#endif
+#ifdef CONFIG_NUMA
+extern void node_dev_init(void);
+/* Core of the node registration - only memory hotplug should use this */
+extern int register_one_node(int nid);
extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
@@ -160,10 +181,6 @@ extern int register_memory_node_under_compute_node(unsigned int mem_nid,
static inline void node_dev_init(void)
{
}
-static inline int __register_one_node(int nid)
-{
- return 0;
-}
static inline int register_one_node(int nid)
{
return 0;
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index f08ae71585fa..7ad1f5c7407e 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -492,21 +492,9 @@ static __always_inline int num_node_state(enum node_states state)
static __always_inline int node_random(const nodemask_t *maskp)
{
#if defined(CONFIG_NUMA) && (MAX_NUMNODES > 1)
- int w, bit;
-
- w = nodes_weight(*maskp);
- switch (w) {
- case 0:
- bit = NUMA_NO_NODE;
- break;
- case 1:
- bit = first_node(*maskp);
- break;
- default:
- bit = find_nth_bit(maskp->bits, MAX_NUMNODES, get_random_u32_below(w));
- break;
- }
- return bit;
+ int node = find_random_bit(maskp->bits, MAX_NUMNODES);
+
+ return node < MAX_NUMNODES ? node : NUMA_NO_NODE;
#else
return 0;
#endif
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 6337ad4e5fe8..a480063c9cb1 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -54,6 +54,7 @@ extern struct irq_domain *of_msi_map_get_device_domain(struct device *dev,
u32 id,
u32 bus_token);
extern void of_msi_configure(struct device *dev, const struct device_node *np);
+extern u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in);
u32 of_msi_map_id(struct device *dev, struct device_node *msi_np, u32 id_in);
#else
static inline void of_irq_init(const struct of_device_id *matches)
@@ -100,6 +101,10 @@ static inline struct irq_domain *of_msi_map_get_device_domain(struct device *dev
static inline void of_msi_configure(struct device *dev, struct device_node *np)
{
}
+static inline u32 of_msi_xlate(struct device *dev, struct device_node **msi_np, u32 id_in)
+{
+ return id_in;
+}
static inline u32 of_msi_map_id(struct device *dev,
struct device_node *msi_np, u32 id_in)
{
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 0146daf34430..765f2778e264 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -90,8 +90,6 @@ struct padata_cpumask {
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
- * @reorder_work: work struct for reordering.
- * @lock: Reorder lock.
*/
struct parallel_data {
struct padata_shell *ps;
@@ -102,8 +100,6 @@ struct parallel_data {
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
- struct work_struct reorder_work;
- spinlock_t ____cacheline_aligned lock;
};
/**
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4fe5ee67535b..8e4d6eda8a8d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -167,8 +167,12 @@ enum pageflags {
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
- /* non-lru isolated movable page */
- PG_isolated = PG_reclaim,
+#ifdef CONFIG_MIGRATION
+ /* movable_ops page that is isolated for migration */
+ PG_movable_ops_isolated = PG_reclaim,
+ /* this is a movable_ops page (for selected typed pages only) */
+ PG_movable_ops = PG_uptodate,
+#endif
/* Only valid for buddy pages. Used to track pages that are reported */
PG_reported = PG_uptodate,
@@ -691,15 +695,12 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
/*
* On an anonymous folio mapped into a user virtual memory area,
* folio->mapping points to its anon_vma, not to a struct address_space;
- * with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
+ * with the FOLIO_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
- * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
+ * On an anonymous folio in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
+ * the FOLIO_MAPPING_ANON_KSM bit may be set along with the FOLIO_MAPPING_ANON
* bit; and then folio->mapping points, not to an anon_vma, but to a private
- * structure which KSM associates with that merged page. See ksm.h.
- *
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
- * page and then folio->mapping points to a struct movable_operations.
+ * structure which KSM associates with that merged folio. See ksm.h.
*
* Please note that, confusingly, "folio_mapping" refers to the inode
* address_space which maps the folio from disk; whereas "folio_mapped"
@@ -712,50 +713,27 @@ PAGEFLAG_FALSE(VmemmapSelfHosted, vmemmap_self_hosted)
* false before calling the following functions (e.g., folio_test_anon).
* See mm/slab.h.
*/
-#define PAGE_MAPPING_ANON 0x1
-#define PAGE_MAPPING_MOVABLE 0x2
-#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
-
-static __always_inline bool folio_mapping_flags(const struct folio *folio)
-{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) != 0;
-}
-
-static __always_inline bool PageMappingFlags(const struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
-}
+#define FOLIO_MAPPING_ANON 0x1
+#define FOLIO_MAPPING_ANON_KSM 0x2
+#define FOLIO_MAPPING_KSM (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
+#define FOLIO_MAPPING_FLAGS (FOLIO_MAPPING_ANON | FOLIO_MAPPING_ANON_KSM)
static __always_inline bool folio_test_anon(const struct folio *folio)
{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_ANON) != 0;
+ return ((unsigned long)folio->mapping & FOLIO_MAPPING_ANON) != 0;
}
static __always_inline bool PageAnonNotKsm(const struct page *page)
{
unsigned long flags = (unsigned long)page_folio(page)->mapping;
- return (flags & PAGE_MAPPING_FLAGS) == PAGE_MAPPING_ANON;
+ return (flags & FOLIO_MAPPING_FLAGS) == FOLIO_MAPPING_ANON;
}
static __always_inline bool PageAnon(const struct page *page)
{
return folio_test_anon(page_folio(page));
}
-
-static __always_inline bool __folio_test_movable(const struct folio *folio)
-{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_MOVABLE;
-}
-
-static __always_inline bool __PageMovable(const struct page *page)
-{
- return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_MOVABLE;
-}
-
#ifdef CONFIG_KSM
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
@@ -765,8 +743,8 @@ static __always_inline bool __PageMovable(const struct page *page)
*/
static __always_inline bool folio_test_ksm(const struct folio *folio)
{
- return ((unsigned long)folio->mapping & PAGE_MAPPING_FLAGS) ==
- PAGE_MAPPING_KSM;
+ return ((unsigned long)folio->mapping & FOLIO_MAPPING_FLAGS) ==
+ FOLIO_MAPPING_KSM;
}
#else
FOLIO_TEST_FLAG_FALSE(ksm)
@@ -1137,7 +1115,53 @@ static inline bool folio_contain_hwpoisoned_page(struct folio *folio)
bool is_free_buddy_page(const struct page *page);
-PAGEFLAG(Isolated, isolated, PF_ANY);
+#ifdef CONFIG_MIGRATION
+/*
+ * This page is migratable through movable_ops (for selected typed pages
+ * only).
+ *
+ * Page migration of such pages might fail, for example, if the page is
+ * already isolated by somebody else, or if the page is about to get freed.
+ *
+ * While a subsystem might set selected typed pages that support page migration
+ * as being movable through movable_ops, it must never clear this flag.
+ *
+ * This flag is only cleared when the page is freed back to the buddy.
+ *
+ * Only selected page types support this flag (see page_movable_ops()) and
+ * the flag might be used in other context for other pages. Always use
+ * page_has_movable_ops() instead.
+ */
+TESTPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
+SETPAGEFLAG(MovableOps, movable_ops, PF_NO_TAIL);
+/*
+ * A movable_ops page has this flag set while it is isolated for migration.
+ * This flag primarily protects against concurrent migration attempts.
+ *
+ * Once migration ended (success or failure), the flag is cleared. The
+ * flag is managed by the migration core.
+ */
+PAGEFLAG(MovableOpsIsolated, movable_ops_isolated, PF_NO_TAIL);
+#else /* !CONFIG_MIGRATION */
+TESTPAGEFLAG_FALSE(MovableOps, movable_ops);
+SETPAGEFLAG_NOOP(MovableOps, movable_ops);
+PAGEFLAG_FALSE(MovableOpsIsolated, movable_ops_isolated);
+#endif /* CONFIG_MIGRATION */
+
+/**
+ * page_has_movable_ops - test for a movable_ops page
+ * @page: The page to test.
+ *
+ * Test whether this is a movable_ops page. Such pages will stay that
+ * way until freed.
+ *
+ * Returns true if this is a movable_ops page, otherwise false.
+ */
+static inline bool page_has_movable_ops(const struct page *page)
+{
+ return PageMovableOps(page) &&
+ (PageOffline(page) || PageZsmalloc(page));
+}
static __always_inline int PageAnonExclusive(const struct page *page)
{
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 898bb788243b..3e2f960e166c 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -11,6 +11,12 @@ static inline bool is_migrate_isolate(int migratetype)
{
return migratetype == MIGRATE_ISOLATE;
}
+#define get_pageblock_isolate(page) \
+ get_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
+#define clear_pageblock_isolate(page) \
+ clear_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
+#define set_pageblock_isolate(page) \
+ set_pfnblock_bit(page, page_to_pfn(page), PB_migrate_isolate)
#else
static inline bool is_migrate_isolate_page(struct page *page)
{
@@ -20,22 +26,45 @@ static inline bool is_migrate_isolate(int migratetype)
{
return false;
}
+static inline bool get_pageblock_isolate(struct page *page)
+{
+ return false;
+}
+static inline void clear_pageblock_isolate(struct page *page)
+{
+}
+static inline void set_pageblock_isolate(struct page *page)
+{
+}
#endif
-#define MEMORY_OFFLINE 0x1
-#define REPORT_FAILURE 0x2
+/*
+ * Pageblock isolation modes:
+ * PB_ISOLATE_MODE_MEM_OFFLINE - isolate to offline (!allocate) memory
+ * e.g., skip over PageHWPoison() pages and
+ * PageOffline() pages. Unmovable pages will be
+ * reported in this mode.
+ * PB_ISOLATE_MODE_CMA_ALLOC - isolate for CMA allocations
+ * PB_ISOLATE_MODE_OTHER - isolate for other purposes
+ */
+enum pb_isolate_mode {
+ PB_ISOLATE_MODE_MEM_OFFLINE,
+ PB_ISOLATE_MODE_CMA_ALLOC,
+ PB_ISOLATE_MODE_OTHER,
+};
-void set_pageblock_migratetype(struct page *page, int migratetype);
+void __meminit init_pageblock_migratetype(struct page *page,
+ enum migratetype migratetype,
+ bool isolate);
-bool move_freepages_block_isolate(struct zone *zone, struct page *page,
- int migratetype);
+bool pageblock_isolate_and_move_free_pages(struct zone *zone, struct page *page);
+bool pageblock_unisolate_and_move_free_pages(struct zone *zone, struct page *page);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags);
+ enum pb_isolate_mode mode);
-void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype);
+void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn);
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- int isol_flags);
+ enum pb_isolate_mode mode);
#endif
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index debdc25f08b9..3328357f6dba 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page,
extern void __split_page_owner(struct page *page, int old_order,
int new_order);
extern void __folio_copy_owner(struct folio *newfolio, struct folio *old);
-extern void __set_page_owner_migrate_reason(struct page *page, int reason);
+extern void __folio_set_owner_migrate_reason(struct folio *folio, int reason);
extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
@@ -43,10 +43,10 @@ static inline void folio_copy_owner(struct folio *newfolio, struct folio *old)
if (static_branch_unlikely(&page_owner_inited))
__folio_copy_owner(newfolio, old);
}
-static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason)
{
if (static_branch_unlikely(&page_owner_inited))
- __set_page_owner_migrate_reason(page, reason);
+ __folio_set_owner_migrate_reason(folio, reason);
}
static inline void dump_page_owner(const struct page *page)
{
@@ -68,7 +68,7 @@ static inline void split_page_owner(struct page *page, int old_order,
static inline void folio_copy_owner(struct folio *newfolio, struct folio *folio)
{
}
-static inline void set_page_owner_migrate_reason(struct page *page, int reason)
+static inline void folio_set_owner_migrate_reason(struct folio *folio, int reason)
{
}
static inline void dump_page_owner(const struct page *page)
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index e73a4292ef02..6a44be0f39f4 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -19,15 +19,33 @@ enum pageblock_bits {
PB_migrate,
PB_migrate_end = PB_migrate + PB_migratetype_bits - 1,
/* 3 bits required for migrate types */
- PB_migrate_skip,/* If set the block is skipped by compaction */
+ PB_compact_skip,/* If set the block is skipped by compaction */
+#ifdef CONFIG_MEMORY_ISOLATION
+ /*
+ * Pageblock isolation is represented with a separate bit, so that
+ * the migratetype of a block is not overwritten by isolation.
+ */
+ PB_migrate_isolate, /* If set the block is isolated */
+#endif
/*
* Assume the bits will always align on a word. If this assumption
* changes then get/set pageblock needs updating.
*/
- NR_PAGEBLOCK_BITS
+ __NR_PAGEBLOCK_BITS
};
+#define NR_PAGEBLOCK_BITS (roundup_pow_of_two(__NR_PAGEBLOCK_BITS))
+
+#define MIGRATETYPE_MASK ((1UL << (PB_migrate_end + 1)) - 1)
+
+#ifdef CONFIG_MEMORY_ISOLATION
+#define MIGRATETYPE_AND_ISO_MASK \
+ (((1UL << (PB_migrate_end + 1)) - 1) | BIT(PB_migrate_isolate))
+#else
+#define MIGRATETYPE_AND_ISO_MASK MIGRATETYPE_MASK
+#endif
+
#if defined(CONFIG_HUGETLB_PAGE)
#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
@@ -41,18 +59,18 @@ extern unsigned int pageblock_order;
* Huge pages are a constant size, but don't exceed the maximum allocation
* granularity.
*/
-#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, PAGE_BLOCK_ORDER)
+#define pageblock_order MIN_T(unsigned int, HUGETLB_PAGE_ORDER, PAGE_BLOCK_MAX_ORDER)
#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
#elif defined(CONFIG_TRANSPARENT_HUGEPAGE)
-#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, PAGE_BLOCK_ORDER)
+#define pageblock_order MIN_T(unsigned int, HPAGE_PMD_ORDER, PAGE_BLOCK_MAX_ORDER)
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
-/* If huge pages are not used, group by PAGE_BLOCK_ORDER */
-#define pageblock_order PAGE_BLOCK_ORDER
+/* If huge pages are not used, group by PAGE_BLOCK_MAX_ORDER */
+#define pageblock_order PAGE_BLOCK_MAX_ORDER
#endif /* CONFIG_HUGETLB_PAGE */
@@ -65,27 +83,23 @@ extern unsigned int pageblock_order;
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(const struct page *page,
- unsigned long pfn,
- unsigned long mask);
-
-void set_pfnblock_flags_mask(struct page *page,
- unsigned long flags,
- unsigned long pfn,
- unsigned long mask);
+enum migratetype get_pfnblock_migratetype(const struct page *page,
+ unsigned long pfn);
+bool get_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
+void set_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
+void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
+ enum pageblock_bits pb_bit);
/* Declarations for getting and setting flags. See mm/page_alloc.c */
#ifdef CONFIG_COMPACTION
#define get_pageblock_skip(page) \
- get_pfnblock_flags_mask(page, page_to_pfn(page), \
- (1 << (PB_migrate_skip)))
+ get_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#define clear_pageblock_skip(page) \
- set_pfnblock_flags_mask(page, 0, page_to_pfn(page), \
- (1 << PB_migrate_skip))
+ clear_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#define set_pageblock_skip(page) \
- set_pfnblock_flags_mask(page, (1 << PB_migrate_skip), \
- page_to_pfn(page), \
- (1 << PB_migrate_skip))
+ set_pfnblock_bit(page, page_to_pfn(page), PB_compact_skip)
#else
static inline bool get_pageblock_skip(struct page *page)
{
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index ce2bcdcadb73..12a12dae727d 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -502,7 +502,7 @@ static inline pgoff_t mapping_align_index(struct address_space *mapping,
static inline bool mapping_large_folio_support(struct address_space *mapping)
{
/* AS_FOLIO_ORDER is only reasonable for pagecache folios */
- VM_WARN_ONCE((unsigned long)mapping & PAGE_MAPPING_ANON,
+ VM_WARN_ONCE((unsigned long)mapping & FOLIO_MAPPING_ANON,
"Anonymous mapping always supports large folio");
return mapping_max_folio_order(mapping) > 0;
@@ -905,7 +905,8 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
* @mapping: target address_space
* @index: the page index
*
- * Same as grab_cache_page(), but do not wait if the page is unavailable.
+ * Returns locked page at given index in given cache, creating it if
+ * needed, but do not wait if the page is locked or to reclaim memory.
* This is intended for speculative data generators, where the data can
* be regenerated if the page couldn't be grabbed. This routine should
* be safe to call while holding the lock for another page.
@@ -969,15 +970,6 @@ unsigned filemap_get_folios_contig(struct address_space *mapping,
unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start,
pgoff_t end, xa_mark_t tag, struct folio_batch *fbatch);
-/*
- * Returns locked page at given index in given cache, creating it if needed.
- */
-static inline struct page *grab_cache_page(struct address_space *mapping,
- pgoff_t index)
-{
- return find_or_create_page(mapping, index, mapping_gfp_mask(mapping));
-}
-
struct folio *read_cache_folio(struct address_space *, pgoff_t index,
filler_t *filler, struct file *file);
struct folio *mapping_read_folio_gfp(struct address_space *, pgoff_t index,
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index 9700a29f8afb..682472c15495 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -14,6 +14,8 @@ enum page_walk_lock {
PGWALK_WRLOCK = 1,
/* vma is expected to be already write-locked during the walk */
PGWALK_WRLOCK_VERIFY = 2,
+ /* vma is expected to be already read-locked during the walk */
+ PGWALK_VMA_RDLOCK_VERIFY = 3,
};
/**
@@ -129,10 +131,9 @@ struct mm_walk {
int walk_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
-int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
- unsigned long end, const struct mm_walk_ops *ops,
- pgd_t *pgd,
- void *private);
+int walk_kernel_page_table_range(unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd, void *private);
int walk_page_range_vma(struct vm_area_struct *vma, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
diff --git a/include/linux/panic.h b/include/linux/panic.h
index 8f2b5d92ac05..7be742628c25 100644
--- a/include/linux/panic.h
+++ b/include/linux/panic.h
@@ -3,6 +3,7 @@
#define _LINUX_PANIC_H
#include <linux/compiler_attributes.h>
+#include <linux/stdarg.h>
#include <linux/types.h>
struct pt_regs;
@@ -10,6 +11,8 @@ struct pt_regs;
extern long (*panic_blink)(int state);
__printf(1, 2)
void panic(const char *fmt, ...) __noreturn __cold;
+__printf(1, 0)
+void vpanic(const char *fmt, va_list args) __noreturn __cold;
void nmi_panic(struct pt_regs *regs, const char *msg);
void check_panic_on_warn(const char *origin);
extern void oops_enter(void);
diff --git a/include/linux/pci-tph.h b/include/linux/pci-tph.h
index c3e806c13d64..9e4e331b1603 100644
--- a/include/linux/pci-tph.h
+++ b/include/linux/pci-tph.h
@@ -28,6 +28,7 @@ int pcie_tph_get_cpu_st(struct pci_dev *dev,
unsigned int cpu_uid, u16 *tag);
void pcie_disable_tph(struct pci_dev *pdev);
int pcie_enable_tph(struct pci_dev *pdev, int mode);
+u16 pcie_tph_get_st_table_size(struct pci_dev *pdev);
#else
static inline int pcie_tph_set_st_entry(struct pci_dev *pdev,
unsigned int index, u16 tag)
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index c16cdeaa505e..12d90360f6db 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -63,14 +63,15 @@
* 1. The symbol must be globally unique, even the static ones.
* 2. Static percpu variables cannot be defined inside a function.
*
- * Archs which need weak percpu definitions should define
- * ARCH_NEEDS_WEAK_PER_CPU in asm/percpu.h when necessary.
+ * Archs which need weak percpu definitions should set
+ * CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU when necessary.
*
* To ensure that the generic code observes the above two
* restrictions, if CONFIG_DEBUG_FORCE_WEAK_PER_CPU is set weak
* definition is used for all cases.
*/
-#if defined(ARCH_NEEDS_WEAK_PER_CPU) || defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
+#if (defined(CONFIG_ARCH_MODULE_NEEDS_WEAK_PER_CPU) && defined(MODULE)) || \
+ defined(CONFIG_DEBUG_FORCE_WEAK_PER_CPU)
/*
* __pcpu_scope_* dummy variable is used to enforce scope. It
* receives the static modifier when it's used in front of
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index 14bc053c53d8..b90ca0b6c331 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -4,15 +4,6 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
-
-/*
- * pfn_t: encapsulates a page-frame number that is optionally backed
- * by memmap (struct page). Whether a pfn_t has a 'struct page'
- * backing is indicated by flags in the high bits of the value.
- */
-typedef struct {
- u64 val;
-} pfn_t;
#endif
#define PFN_ALIGN(x) (((unsigned long)(x) + (PAGE_SIZE - 1)) & PAGE_MASK)
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
deleted file mode 100644
index 2d9148221e9a..000000000000
--- a/include/linux/pfn_t.h
+++ /dev/null
@@ -1,131 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PFN_T_H_
-#define _LINUX_PFN_T_H_
-#include <linux/mm.h>
-
-/*
- * PFN_FLAGS_MASK - mask of all the possible valid pfn_t flags
- * PFN_SG_CHAIN - pfn is a pointer to the next scatterlist entry
- * PFN_SG_LAST - pfn references a page and is the last scatterlist entry
- * PFN_DEV - pfn is not covered by system memmap by default
- * PFN_MAP - pfn has a dynamic page mapping established by a device driver
- * PFN_SPECIAL - for CONFIG_FS_DAX_LIMITED builds to allow XIP, but not
- * get_user_pages
- */
-#define PFN_FLAGS_MASK (((u64) (~PAGE_MASK)) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
-#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
-#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
-#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
-#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
-#define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5))
-
-#define PFN_FLAGS_TRACE \
- { PFN_SPECIAL, "SPECIAL" }, \
- { PFN_SG_CHAIN, "SG_CHAIN" }, \
- { PFN_SG_LAST, "SG_LAST" }, \
- { PFN_DEV, "DEV" }, \
- { PFN_MAP, "MAP" }
-
-static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
-{
- pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
-
- return pfn_t;
-}
-
-/* a default pfn to pfn_t conversion assumes that @pfn is pfn_valid() */
-static inline pfn_t pfn_to_pfn_t(unsigned long pfn)
-{
- return __pfn_to_pfn_t(pfn, 0);
-}
-
-static inline pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
-{
- return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
-}
-
-static inline bool pfn_t_has_page(pfn_t pfn)
-{
- return (pfn.val & PFN_MAP) == PFN_MAP || (pfn.val & PFN_DEV) == 0;
-}
-
-static inline unsigned long pfn_t_to_pfn(pfn_t pfn)
-{
- return pfn.val & ~PFN_FLAGS_MASK;
-}
-
-static inline struct page *pfn_t_to_page(pfn_t pfn)
-{
- if (pfn_t_has_page(pfn))
- return pfn_to_page(pfn_t_to_pfn(pfn));
- return NULL;
-}
-
-static inline phys_addr_t pfn_t_to_phys(pfn_t pfn)
-{
- return PFN_PHYS(pfn_t_to_pfn(pfn));
-}
-
-static inline pfn_t page_to_pfn_t(struct page *page)
-{
- return pfn_to_pfn_t(page_to_pfn(page));
-}
-
-static inline int pfn_t_valid(pfn_t pfn)
-{
- return pfn_valid(pfn_t_to_pfn(pfn));
-}
-
-#ifdef CONFIG_MMU
-static inline pte_t pfn_t_pte(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pte(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-static inline pmd_t pfn_t_pmd(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pmd(pfn_t_to_pfn(pfn), pgprot);
-}
-
-#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
-static inline pud_t pfn_t_pud(pfn_t pfn, pgprot_t pgprot)
-{
- return pfn_pud(pfn_t_to_pfn(pfn), pgprot);
-}
-#endif
-#endif
-
-#ifdef CONFIG_ARCH_HAS_PTE_DEVMAP
-static inline bool pfn_t_devmap(pfn_t pfn)
-{
- const u64 flags = PFN_DEV|PFN_MAP;
-
- return (pfn.val & flags) == flags;
-}
-#else
-static inline bool pfn_t_devmap(pfn_t pfn)
-{
- return false;
-}
-pte_t pte_mkdevmap(pte_t pte);
-pmd_t pmd_mkdevmap(pmd_t pmd);
-#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
- defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
-pud_t pud_mkdevmap(pud_t pud);
-#endif
-#endif /* CONFIG_ARCH_HAS_PTE_DEVMAP */
-
-#ifdef CONFIG_ARCH_HAS_PTE_SPECIAL
-static inline bool pfn_t_special(pfn_t pfn)
-{
- return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL;
-}
-#else
-static inline bool pfn_t_special(pfn_t pfn)
-{
- return false;
-}
-#endif /* CONFIG_ARCH_HAS_PTE_SPECIAL */
-#endif /* _LINUX_PFN_T_H_ */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 0b6e1f781d86..e3b99920be05 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -456,6 +456,17 @@ static inline bool arch_has_hw_pte_young(void)
}
#endif
+#ifndef exec_folio_order
+/*
+ * Returns preferred minimum folio order for executable file-backed memory. Must
+ * be in range [0, PMD_ORDER). Default to order-0.
+ */
+static inline unsigned int exec_folio_order(void)
+{
+ return 0;
+}
+#endif
+
#ifndef arch_check_zapped_pte
static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
pte_t pte)
@@ -1320,7 +1331,9 @@ static inline pte_t ptep_modify_prot_start(struct vm_area_struct *vma,
/*
* Commit an update to a pte, leaving any hardware-controlled bits in
- * the PTE unmodified.
+ * the PTE unmodified. The pte returned from ptep_modify_prot_start() may
+ * additionally have young and/or dirty bits set where previously they were not,
+ * so the updated pte may have these additional changes.
*/
static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr,
@@ -1329,6 +1342,86 @@ static inline void ptep_modify_prot_commit(struct vm_area_struct *vma,
__ptep_modify_prot_commit(vma, addr, ptep, pte);
}
#endif /* __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION */
+
+/**
+ * modify_prot_start_ptes - Start a pte protection read-modify-write transaction
+ * over a batch of ptes, which protects against asynchronous hardware
+ * modifications to the ptes. The intention is not to prevent the hardware from
+ * making pte updates, but to prevent any updates it may make from being lost.
+ * Please see the comment above ptep_modify_prot_start() for full description.
+ *
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @nr: Number of entries.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_modify_prot_start(), collecting the a/d bits from each pte
+ * in the batch.
+ *
+ * Note that PTE bits in the PTE batch besides the PFN can differ.
+ *
+ * Context: The caller holds the page table lock. The PTEs map consecutive
+ * pages that belong to the same folio. All other PTE bits must be identical for
+ * all PTEs in the batch except for young and dirty bits. The PTEs are all in
+ * the same PMD.
+ */
+#ifndef modify_prot_start_ptes
+static inline pte_t modify_prot_start_ptes(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep, unsigned int nr)
+{
+ pte_t pte, tmp_pte;
+
+ pte = ptep_modify_prot_start(vma, addr, ptep);
+ while (--nr) {
+ ptep++;
+ addr += PAGE_SIZE;
+ tmp_pte = ptep_modify_prot_start(vma, addr, ptep);
+ if (pte_dirty(tmp_pte))
+ pte = pte_mkdirty(pte);
+ if (pte_young(tmp_pte))
+ pte = pte_mkyoung(pte);
+ }
+ return pte;
+}
+#endif
+
+/**
+ * modify_prot_commit_ptes - Commit an update to a batch of ptes, leaving any
+ * hardware-controlled bits in the PTE unmodified.
+ *
+ * @vma: The virtual memory area the pages are mapped into.
+ * @addr: Address the first page is mapped at.
+ * @ptep: Page table pointer for the first entry.
+ * @old_pte: Old page table entry (for the first entry) which is now cleared.
+ * @pte: New page table entry to be set.
+ * @nr: Number of entries.
+ *
+ * May be overridden by the architecture; otherwise, implemented as a simple
+ * loop over ptep_modify_prot_commit().
+ *
+ * Context: The caller holds the page table lock. The PTEs are all in the same
+ * PMD. On exit, the set ptes in the batch map the same folio. The ptes set by
+ * ptep_modify_prot_start() may additionally have young and/or dirty bits set
+ * where previously they were not, so the updated ptes may have these
+ * additional changes.
+ */
+#ifndef modify_prot_commit_ptes
+static inline void modify_prot_commit_ptes(struct vm_area_struct *vma, unsigned long addr,
+ pte_t *ptep, pte_t old_pte, pte_t pte, unsigned int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; ++i, ++ptep, addr += PAGE_SIZE) {
+ ptep_modify_prot_commit(vma, addr, ptep, old_pte, pte);
+
+ /* Advance PFN only, set same prot */
+ old_pte = pte_next_pfn(old_pte);
+ pte = pte_next_pfn(pte);
+ }
+}
+#endif
+
#endif /* CONFIG_MMU */
/*
@@ -1632,21 +1725,6 @@ static inline int pud_write(pud_t pud)
}
#endif /* pud_write */
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
static inline int pud_trans_huge(pud_t pud)
@@ -1661,7 +1739,7 @@ static inline int pud_trans_unstable(pud_t *pud)
defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
pud_t pudval = READ_ONCE(*pud);
- if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ if (pud_none(pudval) || pud_trans_huge(pudval))
return 1;
if (unlikely(pud_bad(pudval))) {
pud_clear_bad(pud);
@@ -1901,8 +1979,8 @@ typedef unsigned int pgtbl_mod_mask;
* - It should contain a huge PFN, which points to a huge page larger than
* PAGE_SIZE of the platform. The PFN format isn't important here.
*
- * - It should cover all kinds of huge mappings (e.g., pXd_trans_huge(),
- * pXd_devmap(), or hugetlb mappings).
+ * - It should cover all kinds of huge mappings (i.e. pXd_trans_huge()
+ * or hugetlb mappings).
*/
#ifndef pgd_leaf
#define pgd_leaf(x) false
@@ -2005,7 +2083,7 @@ typedef unsigned int pgtbl_mod_mask;
* x: (yes) yes
*/
#define DECLARE_VM_GET_PAGE_PROT \
-pgprot_t vm_get_page_prot(unsigned long vm_flags) \
+pgprot_t vm_get_page_prot(vm_flags_t vm_flags) \
{ \
return protection_map[vm_flags & \
(VM_READ | VM_WRITE | VM_EXEC | VM_SHARED)]; \
diff --git a/include/linux/platform_data/emc2305.h b/include/linux/platform_data/emc2305.h
index 54d672dd6f7d..76043a97f975 100644
--- a/include/linux/platform_data/emc2305.h
+++ b/include/linux/platform_data/emc2305.h
@@ -9,14 +9,20 @@
* struct emc2305_platform_data - EMC2305 driver platform data
* @max_state: maximum cooling state of the cooling device;
* @pwm_num: number of active channels;
+ * @pwm_output_mask: PWM output mask
+ * @pwm_polarity_mask: PWM polarity mask
* @pwm_separate: separate PWM settings for every channel;
* @pwm_min: array of minimum PWM per channel;
+ * @pwm_freq: array of PWM frequency per channel
*/
struct emc2305_platform_data {
u8 max_state;
u8 pwm_num;
+ u8 pwm_output_mask;
+ u8 pwm_polarity_mask;
bool pwm_separate;
u8 pwm_min[EMC2305_PWM_MAX];
+ u16 pwm_freq[EMC2305_PWM_MAX];
};
#endif
diff --git a/include/linux/platform_data/video-pxafb.h b/include/linux/platform_data/video-pxafb.h
index 6333bac166a5..38c24c77ba43 100644
--- a/include/linux/platform_data/video-pxafb.h
+++ b/include/linux/platform_data/video-pxafb.h
@@ -150,7 +150,6 @@ struct pxafb_mach_info {
};
void pxa_set_fb_info(struct device *, struct pxafb_mach_info *);
-unsigned long pxafb_get_hsync_time(struct device *dev);
/* smartpanel related */
#define SMART_CMD_A0 (0x1 << 8)
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 0cca01b5607b..f21f806bfb38 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -232,7 +232,6 @@ struct power_supply;
/* Run-time specific power supply configuration */
struct power_supply_config {
- struct device_node *of_node;
struct fwnode_handle *fwnode;
/* Driver private data */
@@ -808,19 +807,10 @@ static inline void power_supply_put(struct power_supply *psy) {}
static inline struct power_supply *power_supply_get_by_name(const char *name)
{ return NULL; }
#endif
-#ifdef CONFIG_OF
-extern struct power_supply *power_supply_get_by_phandle(struct device_node *np,
- const char *property);
-extern struct power_supply *devm_power_supply_get_by_phandle(
+extern struct power_supply *power_supply_get_by_reference(struct fwnode_handle *fwnode,
+ const char *property);
+extern struct power_supply *devm_power_supply_get_by_reference(
struct device *dev, const char *property);
-#else /* !CONFIG_OF */
-static inline struct power_supply *
-power_supply_get_by_phandle(struct device_node *np, const char *property)
-{ return NULL; }
-static inline struct power_supply *
-devm_power_supply_get_by_phandle(struct device *dev, const char *property)
-{ return NULL; }
-#endif /* CONFIG_OF */
extern const enum power_supply_property power_supply_battery_info_properties[];
extern const size_t power_supply_battery_info_properties_size;
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 5b462029d03c..5d22b803f51e 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -154,6 +154,8 @@ int vprintk_emit(int facility, int level,
asmlinkage __printf(1, 0)
int vprintk(const char *fmt, va_list args);
+__printf(1, 0)
+int vprintk_deferred(const char *fmt, va_list args);
asmlinkage __printf(1, 2) __cold
int _printk(const char *fmt, ...);
@@ -214,6 +216,11 @@ int vprintk(const char *s, va_list args)
{
return 0;
}
+static inline __printf(1, 0)
+int vprintk_deferred(const char *fmt, va_list args)
+{
+ return 0;
+}
static inline __printf(1, 2) __cold
int _printk(const char *s, ...)
{
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index de1d24f19f76..f139377f4b31 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -27,6 +27,7 @@ enum {
PROC_ENTRY_proc_read_iter = 1U << 1,
PROC_ENTRY_proc_compat_ioctl = 1U << 2,
+ PROC_ENTRY_proc_lseek = 1U << 3,
PROC_ENTRY_FORCE_LOOKUP = 1U << 7,
};
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index cd7f0ae26615..bc90c3c7b5fd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -152,9 +152,7 @@ ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
-void ring_buffer_read_prepare_sync(void);
-void ring_buffer_read_start(struct ring_buffer_iter *iter);
+ring_buffer_read_start(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
struct ring_buffer_event *
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c4f4903b1088..20803fcb49a7 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -893,7 +893,7 @@ static inline int folio_try_share_anon_rmap_pmd(struct folio *folio,
* Called from mm/vmscan.c to handle paging out
*/
int folio_referenced(struct folio *, int is_locked,
- struct mem_cgroup *memcg, unsigned long *vm_flags);
+ struct mem_cgroup *memcg, vm_flags_t *vm_flags);
void try_to_migrate(struct folio *folio, enum ttu_flags flags);
void try_to_unmap(struct folio *, enum ttu_flags flags);
@@ -1025,7 +1025,7 @@ struct anon_vma *folio_lock_anon_vma_read(const struct folio *folio,
static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
- unsigned long *vm_flags)
+ vm_flags_t *vm_flags)
{
*vm_flags = 0;
return 0;
diff --git a/include/linux/rv.h b/include/linux/rv.h
index 3452b5e4b29e..14410a42faef 100644
--- a/include/linux/rv.h
+++ b/include/linux/rv.h
@@ -7,9 +7,17 @@
#ifndef _LINUX_RV_H
#define _LINUX_RV_H
-#define MAX_DA_NAME_LEN 32
+#include <linux/types.h>
+#include <linux/list.h>
+
+#define MAX_DA_NAME_LEN 32
+#define MAX_DA_RETRY_RACING_EVENTS 3
#ifdef CONFIG_RV
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/array_size.h>
+
/*
* Deterministic automaton per-object variables.
*/
@@ -18,27 +26,72 @@ struct da_monitor {
unsigned int curr_state;
};
+#ifdef CONFIG_RV_LTL_MONITOR
+
/*
- * Per-task RV monitors count. Nowadays fixed in RV_PER_TASK_MONITORS.
- * If we find justification for more monitors, we can think about
- * adding more or developing a dynamic method. So far, none of
- * these are justified.
+ * In the future, if the number of atomic propositions or the size of Buchi
+ * automaton is larger, we can switch to dynamic allocation. For now, the code
+ * is simpler this way.
*/
-#define RV_PER_TASK_MONITORS 1
-#define RV_PER_TASK_MONITOR_INIT (RV_PER_TASK_MONITORS)
+#define RV_MAX_LTL_ATOM 32
+#define RV_MAX_BA_STATES 32
-/*
- * Futher monitor types are expected, so make this a union.
+/**
+ * struct ltl_monitor - A linear temporal logic runtime verification monitor
+ * @states: States in the Buchi automaton. As Buchi automaton is a
+ * non-deterministic state machine, the monitor can be in multiple
+ * states simultaneously. This is a bitmask of all possible states.
+ * If this is zero, that means either:
+ * - The monitor has not started yet (e.g. because not all
+ * atomic propositions are known).
+ * - There is no possible state to be in. In other words, a
+ * violation of the LTL property is detected.
+ * @atoms: The values of atomic propositions.
+ * @unknown_atoms: Atomic propositions which are still unknown.
*/
+struct ltl_monitor {
+ DECLARE_BITMAP(states, RV_MAX_BA_STATES);
+ DECLARE_BITMAP(atoms, RV_MAX_LTL_ATOM);
+ DECLARE_BITMAP(unknown_atoms, RV_MAX_LTL_ATOM);
+};
+
+static inline bool rv_ltl_valid_state(struct ltl_monitor *mon)
+{
+ for (int i = 0; i < ARRAY_SIZE(mon->states); ++i) {
+ if (mon->states[i])
+ return true;
+ }
+ return false;
+}
+
+static inline bool rv_ltl_all_atoms_known(struct ltl_monitor *mon)
+{
+ for (int i = 0; i < ARRAY_SIZE(mon->unknown_atoms); ++i) {
+ if (mon->unknown_atoms[i])
+ return false;
+ }
+ return true;
+}
+
+#else
+
+struct ltl_monitor {};
+
+#endif /* CONFIG_RV_LTL_MONITOR */
+
+#define RV_PER_TASK_MONITOR_INIT (CONFIG_RV_PER_TASK_MONITORS)
+
union rv_task_monitor {
- struct da_monitor da_mon;
+ struct da_monitor da_mon;
+ struct ltl_monitor ltl_mon;
};
#ifdef CONFIG_RV_REACTORS
struct rv_reactor {
const char *name;
const char *description;
- void (*react)(char *msg);
+ __printf(1, 2) void (*react)(const char *msg, ...);
+ struct list_head list;
};
#endif
@@ -50,8 +103,12 @@ struct rv_monitor {
void (*disable)(void);
void (*reset)(void);
#ifdef CONFIG_RV_REACTORS
- void (*react)(char *msg);
+ struct rv_reactor *reactor;
+ __printf(1, 2) void (*react)(const char *msg, ...);
#endif
+ struct list_head list;
+ struct rv_monitor *parent;
+ struct dentry *root_d;
};
bool rv_monitoring_on(void);
@@ -64,6 +121,11 @@ void rv_put_task_monitor_slot(int slot);
bool rv_reacting_on(void);
int rv_unregister_reactor(struct rv_reactor *reactor);
int rv_register_reactor(struct rv_reactor *reactor);
+#else
+static inline bool rv_reacting_on(void)
+{
+ return false;
+}
#endif /* CONFIG_RV_REACTORS */
#endif /* CONFIG_RV */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 40d2fa90df42..a9693f179c58 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -340,9 +340,11 @@ extern void io_schedule_finish(int token);
extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
-/* wrapper function to trace from this header file */
+/* wrapper functions to trace from this header file */
DECLARE_TRACEPOINT(sched_set_state_tp);
extern void __trace_set_current_state(int state_value);
+DECLARE_TRACEPOINT(sched_set_need_resched_tp);
+extern void __trace_set_need_resched(struct task_struct *curr, int tif);
/**
* struct prev_cputime - snapshot of system and user cputime
@@ -1634,12 +1636,10 @@ struct task_struct {
#ifdef CONFIG_RV
/*
- * Per-task RV monitor. Nowadays fixed in RV_PER_TASK_MONITORS.
- * If we find justification for more monitors, we can think
- * about adding more or developing a dynamic method. So far,
- * none of these are justified.
+ * Per-task RV monitor, fixed in CONFIG_RV_PER_TASK_MONITORS.
+ * If memory becomes a concern, we can think about a dynamic method.
*/
- union rv_task_monitor rv[RV_PER_TASK_MONITORS];
+ union rv_task_monitor rv[CONFIG_RV_PER_TASK_MONITORS];
#endif
#ifdef CONFIG_USER_EVENTS
@@ -2030,6 +2030,9 @@ static inline int test_tsk_thread_flag(struct task_struct *tsk, int flag)
static inline void set_tsk_need_resched(struct task_struct *tsk)
{
+ if (tracepoint_enabled(sched_set_need_resched_tp) &&
+ !test_tsk_thread_flag(tsk, TIF_NEED_RESCHED))
+ __trace_set_need_resched(tsk, TIF_NEED_RESCHED);
set_tsk_thread_flag(tsk,TIF_NEED_RESCHED);
}
diff --git a/include/linux/sched/ext.h b/include/linux/sched/ext.h
index f7545430a548..7047101dbf58 100644
--- a/include/linux/sched/ext.h
+++ b/include/linux/sched/ext.h
@@ -164,7 +164,7 @@ struct sched_ext_entity {
/*
* Runtime budget in nsecs. This is usually set through
- * scx_bpf_dispatch() but can also be modified directly by the BPF
+ * scx_bpf_dsq_insert() but can also be modified directly by the BPF
* scheduler. Automatically decreased by SCX as the task executes. On
* depletion, a scheduling event is triggered.
*
@@ -176,10 +176,10 @@ struct sched_ext_entity {
/*
* Used to order tasks when dispatching to the vtime-ordered priority
- * queue of a dsq. This is usually set through scx_bpf_dispatch_vtime()
- * but can also be modified directly by the BPF scheduler. Modifying it
- * while a task is queued on a dsq may mangle the ordering and is not
- * recommended.
+ * queue of a dsq. This is usually set through
+ * scx_bpf_dsq_insert_vtime() but can also be modified directly by the
+ * BPF scheduler. Modifying it while a task is queued on a dsq may
+ * mangle the ordering and is not recommended.
*/
u64 dsq_vtime;
@@ -206,12 +206,25 @@ struct sched_ext_entity {
void sched_ext_free(struct task_struct *p);
void print_scx_info(const char *log_lvl, struct task_struct *p);
void scx_softlockup(u32 dur_s);
+bool scx_rcu_cpu_stall(void);
#else /* !CONFIG_SCHED_CLASS_EXT */
static inline void sched_ext_free(struct task_struct *p) {}
static inline void print_scx_info(const char *log_lvl, struct task_struct *p) {}
static inline void scx_softlockup(u32 dur_s) {}
+static inline bool scx_rcu_cpu_stall(void) { return false; }
#endif /* CONFIG_SCHED_CLASS_EXT */
+
+struct scx_task_group {
+#ifdef CONFIG_EXT_GROUP_SCHED
+ u32 flags; /* SCX_TG_* */
+ u32 weight;
+ u64 bw_period_us;
+ u64 bw_quota_us;
+ u64 bw_burst_us;
+#endif
+};
+
#endif /* _LINUX_SCHED_EXT_H */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 5f03a39a26f7..6d0f9c599ff7 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -11,6 +11,8 @@
#include <linux/fs_parser.h>
#include <linux/userfaultfd_k.h>
+struct swap_iocb;
+
/* inode in-kernel data */
#ifdef CONFIG_TMPFS_QUOTA
@@ -107,7 +109,8 @@ static inline bool shmem_mapping(struct address_space *mapping)
void shmem_unlock_mapping(struct address_space *mapping);
struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
pgoff_t index, gfp_t gfp_mask);
-int shmem_writeout(struct folio *folio, struct writeback_control *wbc);
+int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
+ struct list_head *folio_list);
void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
int shmem_unuse(unsigned int type);
diff --git a/include/linux/soc/qcom/ubwc.h b/include/linux/soc/qcom/ubwc.h
new file mode 100644
index 000000000000..1ed8b1b16bc9
--- /dev/null
+++ b/include/linux/soc/qcom/ubwc.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2018, The Linux Foundation
+ * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
+ */
+
+#ifndef __QCOM_UBWC_H__
+#define __QCOM_UBWC_H__
+
+#include <linux/bits.h>
+#include <linux/types.h>
+
+struct qcom_ubwc_cfg_data {
+ u32 ubwc_enc_version;
+ /* Can be read from MDSS_BASE + 0x58 */
+ u32 ubwc_dec_version;
+
+ /**
+ * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
+ *
+ * UBWC 1.0 always enables all three levels.
+ * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
+ * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
+ */
+ u32 ubwc_swizzle;
+#define UBWC_SWIZZLE_ENABLE_LVL1 BIT(0)
+#define UBWC_SWIZZLE_ENABLE_LVL2 BIT(1)
+#define UBWC_SWIZZLE_ENABLE_LVL3 BIT(2)
+
+ /**
+ * @highest_bank_bit: Highest Bank Bit
+ *
+ * The Highest Bank Bit value represents the bit of the highest
+ * DDR bank. This should ideally use DRAM type detection.
+ */
+ int highest_bank_bit;
+ bool ubwc_bank_spread;
+
+ /**
+ * @macrotile_mode: Macrotile Mode
+ *
+ * Whether to use 4-channel macrotiling mode or the newer
+ * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
+ * 4-channel and 1 is 8-channel.
+ */
+ bool macrotile_mode;
+};
+
+#define UBWC_1_0 0x10000000
+#define UBWC_2_0 0x20000000
+#define UBWC_3_0 0x30000000
+#define UBWC_4_0 0x40000000
+#define UBWC_4_3 0x40030000
+#define UBWC_5_0 0x50000000
+
+#if IS_ENABLED(CONFIG_QCOM_UBWC_CONFIG)
+const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void);
+#else
+static inline const struct qcom_ubwc_cfg_data *qcom_ubwc_config_get_data(void)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
+
+static inline bool qcom_ubwc_get_ubwc_mode(const struct qcom_ubwc_cfg_data *cfg)
+{
+ bool ret = cfg->ubwc_enc_version == UBWC_1_0;
+
+ if (ret && !(cfg->ubwc_swizzle & UBWC_SWIZZLE_ENABLE_LVL1))
+ pr_err("UBWC config discrepancy - level 1 swizzling disabled on UBWC 1.0\n");
+
+ return ret;
+}
+
+#endif /* __QCOM_UBWC_H__ */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index c4830dfaff3d..82390712794c 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -424,7 +424,7 @@ bool spi_mem_default_supports_op(struct spi_mem *mem,
int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op);
void spi_mem_adjust_op_freq(struct spi_mem *mem, struct spi_mem_op *op);
-u64 spi_mem_calc_op_duration(struct spi_mem_op *op);
+u64 spi_mem_calc_op_duration(struct spi_mem *mem, struct spi_mem_op *op);
bool spi_mem_supports_op(struct spi_mem *mem,
const struct spi_mem_op *op);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 6a3f92098872..317ae31e89b3 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -478,6 +478,7 @@ extern unsigned int lock_system_sleep(void);
extern void unlock_system_sleep(unsigned int);
extern bool pm_sleep_transition_in_progress(void);
+bool pm_hibernate_is_recovering(void);
#else /* !CONFIG_PM_SLEEP */
@@ -511,6 +512,7 @@ static inline unsigned int lock_system_sleep(void) { return 0; }
static inline void unlock_system_sleep(unsigned int flags) {}
static inline bool pm_sleep_transition_in_progress(void) { return false; }
+static inline bool pm_hibernate_is_recovering(void) { return false; }
#endif /* !CONFIG_PM_SLEEP */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index bc0e1c275fc0..2fe6ed2cc3fd 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -376,8 +376,9 @@ extern unsigned long totalreserve_pages;
/* linux/mm/swap.c */
-void lru_note_cost(struct lruvec *lruvec, bool file,
- unsigned int nr_io, unsigned int nr_rotated);
+void lru_note_cost_unlock_irq(struct lruvec *lruvec, bool file,
+ unsigned int nr_io, unsigned int nr_rotated)
+ __releases(lruvec->lru_lock);
void lru_note_cost_refault(struct folio *);
void folio_add_lru(struct folio *);
void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
@@ -415,7 +416,7 @@ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
#define MIN_SWAPPINESS 0
#define MAX_SWAPPINESS 200
-/* Just recliam from anon folios in proactive memory reclaim */
+/* Just reclaim from anon folios in proactive memory reclaim */
#define SWAPPINESS_ANON_ONLY (MAX_SWAPPINESS + 1)
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
@@ -431,6 +432,22 @@ extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
long remove_mapping(struct address_space *mapping, struct folio *folio);
+#if defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+extern int reclaim_register_node(struct node *node);
+extern void reclaim_unregister_node(struct node *node);
+
+#else
+
+static inline int reclaim_register_node(struct node *node)
+{
+ return 0;
+}
+
+static inline void reclaim_unregister_node(struct node *node)
+{
+}
+#endif /* CONFIG_SYSFS && CONFIG_NUMA */
+
#ifdef CONFIG_NUMA
extern int sysctl_min_unmapped_ratio;
extern int sysctl_min_slab_ratio;
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index 07cbab516942..b449665c686a 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -7,9 +7,13 @@
* Copyright (c) 2012-2013 David Herrmann <dh.herrmann@gmail.com>
*/
-#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/types.h>
+
#include <linux/platform_data/simplefb.h>
+struct device;
+struct platform_device;
struct screen_info;
enum {
diff --git a/include/linux/usb/uvc.h b/include/linux/usb/uvc.h
index bce95153e5a6..ee19e9f915b8 100644
--- a/include/linux/usb/uvc.h
+++ b/include/linux/usb/uvc.h
@@ -29,6 +29,9 @@
#define UVC_GUID_EXT_GPIO_CONTROLLER \
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, \
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03}
+#define UVC_GUID_MSXU_1_5 \
+ {0xdc, 0x95, 0x3f, 0x0f, 0x32, 0x26, 0x4e, 0x4c, \
+ 0x92, 0xc9, 0xa0, 0x47, 0x82, 0xf4, 0x3b, 0xc8}
#define UVC_GUID_FORMAT_MJPEG \
{ 'M', 'J', 'P', 'G', 0x00, 0x00, 0x10, 0x00, \
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index 75342022d144..c0e716aec26a 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -30,11 +30,7 @@
* from userfaultfd, in order to leave a free define-space for
* shared O_* flags.
*/
-#define UFFD_CLOEXEC O_CLOEXEC
-#define UFFD_NONBLOCK O_NONBLOCK
-
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
-#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
/*
* Start with fault_pending_wqh and fault_wqh so they're more likely
@@ -213,12 +209,12 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
}
static inline bool vma_can_userfault(struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
bool wp_async)
{
vm_flags &= __VM_UFFD_FLAGS;
- if (vm_flags & VM_DROPPABLE)
+ if (vma->vm_flags & VM_DROPPABLE)
return false;
if ((vm_flags & VM_UFFD_MINOR) &&
@@ -263,6 +259,7 @@ extern void mremap_userfaultfd_prep(struct vm_area_struct *,
extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
unsigned long from, unsigned long to,
unsigned long len);
+void mremap_userfaultfd_fail(struct vm_userfaultfd_ctx *);
extern bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
@@ -285,7 +282,7 @@ struct vm_area_struct *userfaultfd_clear_vma(struct vma_iterator *vmi,
int userfaultfd_register_range(struct userfaultfd_ctx *ctx,
struct vm_area_struct *vma,
- unsigned long vm_flags,
+ vm_flags_t vm_flags,
unsigned long start, unsigned long end,
bool wp_async);
@@ -375,6 +372,10 @@ static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
{
}
+static inline void mremap_userfaultfd_fail(struct vm_userfaultfd_ctx *ctx)
+{
+}
+
static inline bool userfaultfd_remove(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index b2ccb6845595..c287998908bf 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -507,7 +507,7 @@ static inline const char *lru_list_name(enum lru_list lru)
return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
}
-#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
+#if defined(CONFIG_VM_EVENT_COUNTERS)
static inline const char *vm_event_name(enum vm_event_item item)
{
return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
@@ -516,7 +516,7 @@ static inline const char *vm_event_name(enum vm_event_item item)
NR_VM_STAT_ITEMS +
item];
}
-#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
+#endif /* CONFIG_VM_EVENT_COUNTERS */
#ifdef CONFIG_MEMCG
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 965a19809c7e..09855d819418 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -164,6 +164,8 @@ static inline bool wq_has_sleeper(struct wait_queue_head *wq_head)
extern void add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_exclusive(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
extern void add_wait_queue_priority(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
+extern int add_wait_queue_priority_exclusive(struct wait_queue_head *wq_head,
+ struct wait_queue_entry *wq_entry);
extern void remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry);
static inline void __add_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq_entry)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 6e30f275da77..45d5dd470ff6 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -6,6 +6,7 @@
#ifndef _LINUX_WORKQUEUE_H
#define _LINUX_WORKQUEUE_H
+#include <linux/alloc_tag.h>
#include <linux/timer.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
@@ -401,6 +402,7 @@ enum wq_flags {
* http://thread.gmane.org/gmane.linux.kernel/1480396
*/
WQ_POWER_EFFICIENT = 1 << 7,
+ WQ_PERCPU = 1 << 8, /* bound to a specific cpu */
__WQ_DESTROYING = 1 << 15, /* internal: workqueue is destroying */
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
@@ -427,7 +429,7 @@ enum wq_consts {
/*
* System-wide workqueues which are always present.
*
- * system_wq is the one used by schedule[_delayed]_work[_on]().
+ * system_percpu_wq is the one used by schedule[_delayed]_work[_on]().
* Multi-CPU multi-threaded. There are users which expect relatively
* short queue flush time. Don't queue works which can run for too
* long.
@@ -438,7 +440,7 @@ enum wq_consts {
* system_long_wq is similar to system_wq but may host long running
* works. Queue flushing might take relatively long.
*
- * system_unbound_wq is unbound workqueue. Workers are not bound to
+ * system_dfl_wq is unbound workqueue. Workers are not bound to
* any specific CPU, not concurrency managed, and all queued works are
* executed immediately as long as max_active limit is not reached and
* resources are available.
@@ -455,10 +457,12 @@ enum wq_consts {
* system_bh[_highpri]_wq are convenience interface to softirq. BH work items
* are executed in the queueing CPU's BH context in the queueing order.
*/
-extern struct workqueue_struct *system_wq;
+extern struct workqueue_struct *system_wq; /* use system_percpu_wq, this will be removed */
+extern struct workqueue_struct *system_percpu_wq;
extern struct workqueue_struct *system_highpri_wq;
extern struct workqueue_struct *system_long_wq;
extern struct workqueue_struct *system_unbound_wq;
+extern struct workqueue_struct *system_dfl_wq;
extern struct workqueue_struct *system_freezable_wq;
extern struct workqueue_struct *system_power_efficient_wq;
extern struct workqueue_struct *system_freezable_power_efficient_wq;
@@ -505,7 +509,8 @@ void workqueue_softirq_dead(unsigned int cpu);
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
__printf(1, 4) struct workqueue_struct *
-alloc_workqueue(const char *fmt, unsigned int flags, int max_active, ...);
+alloc_workqueue_noprof(const char *fmt, unsigned int flags, int max_active, ...);
+#define alloc_workqueue(...) alloc_hooks(alloc_workqueue_noprof(__VA_ARGS__))
#ifdef CONFIG_LOCKDEP
/**
@@ -544,8 +549,8 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
* Pointer to the allocated workqueue on success, %NULL on failure.
*/
#define alloc_ordered_workqueue_lockdep_map(fmt, flags, lockdep_map, args...) \
- alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), \
- 1, lockdep_map, ##args)
+ alloc_hooks(alloc_workqueue_lockdep_map(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags),\
+ 1, lockdep_map, ##args))
#endif
/**
@@ -577,7 +582,9 @@ alloc_workqueue_lockdep_map(const char *fmt, unsigned int flags, int max_active,
extern void destroy_workqueue(struct workqueue_struct *wq);
-struct workqueue_attrs *alloc_workqueue_attrs(void);
+struct workqueue_attrs *alloc_workqueue_attrs_noprof(void);
+#define alloc_workqueue_attrs(...) alloc_hooks(alloc_workqueue_attrs_noprof(__VA_ARGS__))
+
void free_workqueue_attrs(struct workqueue_attrs *attrs);
int apply_workqueue_attrs(struct workqueue_struct *wq,
const struct workqueue_attrs *attrs);
@@ -840,19 +847,6 @@ long work_on_cpu_key(int cpu, long (*fn)(void *),
work_on_cpu_key(_cpu, _fn, _arg, &__key); \
})
-long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
- void *arg, struct lock_class_key *key);
-
-/*
- * A new key is defined for each caller to make sure the work
- * associated with the function doesn't share its locking class.
- */
-#define work_on_cpu_safe(_cpu, _fn, _arg) \
-({ \
- static struct lock_class_key __key; \
- \
- work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
-})
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index eda4b62511f7..a2848d731a46 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -59,7 +59,6 @@ struct writeback_control {
unsigned for_kupdate:1; /* A kupdate writeback */
unsigned for_background:1; /* A background writeback */
unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
- unsigned for_reclaim:1; /* Invoked from the page allocator */
unsigned range_cyclic:1; /* range_start is cyclic */
unsigned for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
unsigned unpinned_netfs_wb:1; /* Cleared I_PINNING_NETFS_WB */
@@ -72,16 +71,6 @@ struct writeback_control {
*/
unsigned no_cgroup_owner:1;
- /* To enable batching of swap writes to non-block-device backends,
- * "plug" can be set point to a 'struct swap_iocb *'. When all swap
- * writes have been submitted, if with swap_iocb is not NULL,
- * swap_write_unplug() should be called.
- */
- struct swap_iocb **swap_plug;
-
- /* Target list for splitting a large folio */
- struct list_head *list;
-
/* internal fields used by the ->writepages implementation: */
struct folio_batch fbatch;
pgoff_t index;
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 13e9cc5490f7..f3ccff2d966c 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -46,4 +46,6 @@ void zs_obj_read_end(struct zs_pool *pool, unsigned long handle,
void zs_obj_write(struct zs_pool *pool, unsigned long handle,
void *handle_mem, size_t mem_len);
+extern const struct movable_operations zsmalloc_mops;
+
#endif
diff --git a/include/media/rcar-fcp.h b/include/media/rcar-fcp.h
index 179240fb163b..6ac9be9f675e 100644
--- a/include/media/rcar-fcp.h
+++ b/include/media/rcar-fcp.h
@@ -18,6 +18,7 @@ void rcar_fcp_put(struct rcar_fcp_device *fcp);
struct device *rcar_fcp_get_device(struct rcar_fcp_device *fcp);
int rcar_fcp_enable(struct rcar_fcp_device *fcp);
void rcar_fcp_disable(struct rcar_fcp_device *fcp);
+int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp);
#else
static inline struct rcar_fcp_device *rcar_fcp_get(const struct device_node *np)
{
@@ -33,6 +34,10 @@ static inline int rcar_fcp_enable(struct rcar_fcp_device *fcp)
return 0;
}
static inline void rcar_fcp_disable(struct rcar_fcp_device *fcp) { }
+static inline int rcar_fcp_soft_reset(struct rcar_fcp_device *fcp)
+{
+ return 0;
+}
#endif
#endif /* __MEDIA_RCAR_FCP_H__ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 3a87096e064f..c32c46286441 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -579,8 +579,10 @@ int v4l2_ctrl_handler_init_class(struct v4l2_ctrl_handler *hdl,
* @hdl: The control handler.
*
* Does nothing if @hdl == NULL.
+ *
+ * Return: @hdl's error field or 0 if @hdl is NULL.
*/
-void v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
+int v4l2_ctrl_handler_free(struct v4l2_ctrl_handler *hdl);
/**
* v4l2_ctrl_lock() - Helper function to lock the handler
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 1b6222fab24e..a69801274800 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -313,10 +313,16 @@ struct video_device {
* media_entity_to_video_device - Returns a &struct video_device from
* the &struct media_entity embedded on it.
*
- * @__entity: pointer to &struct media_entity
- */
-#define media_entity_to_video_device(__entity) \
- container_of(__entity, struct video_device, entity)
+ * @__entity: pointer to &struct media_entity, may be NULL
+ */
+#define media_entity_to_video_device(__entity) \
+({ \
+ typeof(__entity) __me_vdev_ent = __entity; \
+ \
+ __me_vdev_ent ? \
+ container_of(__me_vdev_ent, struct video_device, entity) : \
+ NULL; \
+})
/**
* to_video_device - Returns a &struct video_device from the
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index c6ec87e88dfe..82695c3a300a 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -679,6 +679,7 @@ long int v4l2_compat_ioctl32(struct file *file, unsigned int cmd,
#endif
unsigned int v4l2_compat_translate_cmd(unsigned int cmd);
+unsigned int v4l2_translate_cmd(unsigned int cmd);
int v4l2_compat_get_user(void __user *arg, void *parg, unsigned int cmd);
int v4l2_compat_put_user(void __user *arg, void *parg, unsigned int cmd);
int v4l2_compat_get_array_args(struct file *file, void *mbuf,
diff --git a/include/media/v4l2-jpeg.h b/include/media/v4l2-jpeg.h
index b65658a02e3c..62dda1560275 100644
--- a/include/media/v4l2-jpeg.h
+++ b/include/media/v4l2-jpeg.h
@@ -169,15 +169,6 @@ struct v4l2_jpeg_header {
int v4l2_jpeg_parse_header(void *buf, size_t len, struct v4l2_jpeg_header *out);
-int v4l2_jpeg_parse_frame_header(void *buf, size_t len,
- struct v4l2_jpeg_frame_header *frame_header);
-int v4l2_jpeg_parse_scan_header(void *buf, size_t len,
- struct v4l2_jpeg_scan_header *scan_header);
-int v4l2_jpeg_parse_quantization_tables(void *buf, size_t len, u8 precision,
- struct v4l2_jpeg_reference *q_tables);
-int v4l2_jpeg_parse_huffman_tables(void *buf, size_t len,
- struct v4l2_jpeg_reference *huffman_tables);
-
extern const u8 v4l2_jpeg_zigzag_scan_index[V4L2_JPEG_PIXELS_IN_BLOCK];
extern const u8 v4l2_jpeg_ref_table_luma_qt[V4L2_JPEG_PIXELS_IN_BLOCK];
extern const u8 v4l2_jpeg_ref_table_chroma_qt[V4L2_JPEG_PIXELS_IN_BLOCK];
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 57f2bcb4eb16..5dcf4065708f 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -460,8 +460,6 @@ enum v4l2_subdev_pre_streamon_flags {
* but use the v4l2_subdev_enable_streams() and
* v4l2_subdev_disable_streams() helpers.
*
- * @g_pixelaspect: callback to return the pixelaspect ratio.
- *
* @s_rx_buffer: set a host allocated memory buffer for the subdev. The subdev
* can adjust @size to a lower value and must not write more data to the
* buffer starting at @data than the original value of @size.
@@ -491,7 +489,6 @@ struct v4l2_subdev_video_ops {
int (*g_tvnorms_output)(struct v4l2_subdev *sd, v4l2_std_id *std);
int (*g_input_status)(struct v4l2_subdev *sd, u32 *status);
int (*s_stream)(struct v4l2_subdev *sd, int enable);
- int (*g_pixelaspect)(struct v4l2_subdev *sd, struct v4l2_fract *aspect);
int (*s_rx_buffer)(struct v4l2_subdev *sd, void *buf,
unsigned int *size);
int (*pre_streamon)(struct v4l2_subdev *sd, u32 flags);
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 4ea6352fd63f..d9b91ff02761 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -14,6 +14,11 @@
#include <linux/videodev2.h>
struct device;
+struct vsp1_dl_list;
+
+/* -----------------------------------------------------------------------------
+ * VSP1 DU interface
+ */
int vsp1_du_init(struct device *dev);
@@ -121,4 +126,88 @@ void vsp1_du_atomic_flush(struct device *dev, unsigned int pipe_index,
int vsp1_du_map_sg(struct device *dev, struct sg_table *sgt);
void vsp1_du_unmap_sg(struct device *dev, struct sg_table *sgt);
+/* -----------------------------------------------------------------------------
+ * VSP1 ISP interface
+ */
+
+/**
+ * struct vsp1_isp_buffer_desc - Describe a buffer allocated by VSPX
+ * @size: Byte size of the buffer allocated by VSPX
+ * @cpu_addr: CPU-mapped address of a buffer allocated by VSPX
+ * @dma_addr: bus address of a buffer allocated by VSPX
+ */
+struct vsp1_isp_buffer_desc {
+ size_t size;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+/**
+ * struct vsp1_isp_job_desc - Describe a VSPX buffer transfer request
+ * @config: ConfigDMA buffer descriptor
+ * @config.pairs: number of reg-value pairs in the ConfigDMA buffer
+ * @config.mem: bus address of the ConfigDMA buffer
+ * @img: RAW image buffer descriptor
+ * @img.fmt: RAW image format
+ * @img.mem: bus address of the RAW image buffer
+ * @dl: pointer to the display list populated by the VSPX driver in the
+ * vsp1_isp_job_prepare() function
+ *
+ * Describe a transfer request for the VSPX to perform on behalf of the ISP.
+ * The job descriptor contains an optional ConfigDMA buffer and one RAW image
+ * buffer. Set config.pairs to 0 if no ConfigDMA buffer should be transferred.
+ * The minimum number of config.pairs that can be written using ConfigDMA is 17.
+ * A number of pairs < 16 corrupts the output image. A number of pairs == 16
+ * freezes the VSPX operation. If the ISP driver has to write less than 17 pairs
+ * it shall pad the buffer with writes directed to registers that have no effect
+ * or avoid using ConfigDMA at all for such small write sequences.
+ *
+ * The ISP driver shall pass an instance this type to the vsp1_isp_job_prepare()
+ * function that will populate the display list pointer @dl using the @config
+ * and @img descriptors. When the job has to be run on the VSPX, the descriptor
+ * shall be passed to vsp1_isp_job_run() which consumes the display list.
+ *
+ * Job descriptors not yet run shall be released with a call to
+ * vsp1_isp_job_release() when stopping the streaming in order to properly
+ * release the resources acquired by vsp1_isp_job_prepare().
+ */
+struct vsp1_isp_job_desc {
+ struct {
+ unsigned int pairs;
+ dma_addr_t mem;
+ } config;
+ struct {
+ struct v4l2_pix_format_mplane fmt;
+ dma_addr_t mem;
+ } img;
+ struct vsp1_dl_list *dl;
+};
+
+/**
+ * struct vsp1_vspx_frame_end - VSPX frame end callback data
+ * @vspx_frame_end: Frame end callback. Called after a transfer job has been
+ * completed. If the job includes both a ConfigDMA and a
+ * RAW image, the callback is called after both have been
+ * transferred
+ * @frame_end_data: Frame end callback data, passed to vspx_frame_end
+ */
+struct vsp1_vspx_frame_end {
+ void (*vspx_frame_end)(void *data);
+ void *frame_end_data;
+};
+
+int vsp1_isp_init(struct device *dev);
+struct device *vsp1_isp_get_bus_master(struct device *dev);
+int vsp1_isp_alloc_buffer(struct device *dev, size_t size,
+ struct vsp1_isp_buffer_desc *buffer_desc);
+void vsp1_isp_free_buffer(struct device *dev,
+ struct vsp1_isp_buffer_desc *buffer_desc);
+int vsp1_isp_start_streaming(struct device *dev,
+ struct vsp1_vspx_frame_end *frame_end);
+void vsp1_isp_stop_streaming(struct device *dev);
+int vsp1_isp_job_prepare(struct device *dev,
+ struct vsp1_isp_job_desc *job);
+int vsp1_isp_job_run(struct device *dev, struct vsp1_isp_job_desc *job);
+void vsp1_isp_job_release(struct device *dev, struct vsp1_isp_job_desc *job);
+
#endif /* __MEDIA_VSP1_H__ */
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 14c9f943d53f..c8cd0f00c845 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -252,6 +252,7 @@ TRACE_EVENT(non_standard_event,
__print_hex(__get_dynamic_array(buf), __entry->len))
);
+#ifdef CONFIG_PCIEAER
/*
* PCIe AER Trace event
*
@@ -337,6 +338,7 @@ TRACE_EVENT(aer_event,
__print_array(__entry->tlp_header, PCIE_STD_MAX_TLP_HEADERLOG, 4) :
"Not available")
);
+#endif /* CONFIG_PCIEAER */
/*
* memory-failure recovery action result event
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 7dc7b1cc71b5..0a8e092c0ea8 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -52,11 +52,15 @@ static inline int ib_umem_offset(struct ib_umem *umem)
return umem->address & ~PAGE_MASK;
}
+static inline dma_addr_t ib_umem_start_dma_addr(struct ib_umem *umem)
+{
+ return sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem);
+}
+
static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
unsigned long pgsz)
{
- return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
- (pgsz - 1);
+ return ib_umem_start_dma_addr(umem) & (pgsz - 1);
}
static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
@@ -135,14 +139,27 @@ static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
unsigned long pgsz_bitmap,
u64 pgoff_bitmask)
{
- struct scatterlist *sg = umem->sgt_append.sgt.sgl;
dma_addr_t dma_addr;
- dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
+ dma_addr = ib_umem_start_dma_addr(umem);
return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
dma_addr & pgoff_bitmask);
}
+static inline bool ib_umem_is_contiguous(struct ib_umem *umem)
+{
+ dma_addr_t dma_addr;
+ unsigned long pgsz;
+
+ /*
+ * Select the smallest aligned page that can contain the whole umem if
+ * it was contiguous.
+ */
+ dma_addr = ib_umem_start_dma_addr(umem);
+ pgsz = roundup_pow_of_two((dma_addr ^ (umem->length - 1 + dma_addr)) + 1);
+ return !!ib_umem_find_best_pgoff(umem, pgsz, U64_MAX);
+}
+
struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
unsigned long offset, size_t size,
int fd, int access,
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index af43a8d2a74a..6139223e92e4 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -42,6 +42,7 @@
#include <rdma/signature.h>
#include <uapi/rdma/rdma_user_ioctl.h>
#include <uapi/rdma/ib_user_ioctl_verbs.h>
+#include <linux/pci-tph.h>
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
@@ -1846,6 +1847,27 @@ struct ib_dm {
atomic_t usecnt;
};
+/* bit values to mark existence of ib_dmah fields */
+enum {
+ IB_DMAH_CPU_ID_EXISTS,
+ IB_DMAH_MEM_TYPE_EXISTS,
+ IB_DMAH_PH_EXISTS,
+};
+
+struct ib_dmah {
+ struct ib_device *device;
+ struct ib_uobject *uobject;
+ /*
+ * Implementation details of the RDMA core, don't use in drivers:
+ */
+ struct rdma_restrack_entry res;
+ u32 cpu_id;
+ enum tph_mem_type mem_type;
+ atomic_t usecnt;
+ u8 ph;
+ u8 valid_fields; /* use IB_DMAH_XXX_EXISTS */
+};
+
struct ib_mr {
struct ib_device *device;
struct ib_pd *pd;
@@ -1863,6 +1885,7 @@ struct ib_mr {
struct ib_dm *dm;
struct ib_sig_attrs *sig_attrs; /* only for IB_MR_TYPE_INTEGRITY MRs */
+ struct ib_dmah *dmah;
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -2486,16 +2509,31 @@ struct ib_device_ops {
int (*destroy_qp)(struct ib_qp *qp, struct ib_udata *udata);
int (*create_cq)(struct ib_cq *cq, const struct ib_cq_init_attr *attr,
struct uverbs_attr_bundle *attrs);
+ int (*create_cq_umem)(struct ib_cq *cq,
+ const struct ib_cq_init_attr *attr,
+ struct ib_umem *umem,
+ struct uverbs_attr_bundle *attrs);
int (*modify_cq)(struct ib_cq *cq, u16 cq_count, u16 cq_period);
int (*destroy_cq)(struct ib_cq *cq, struct ib_udata *udata);
int (*resize_cq)(struct ib_cq *cq, int cqe, struct ib_udata *udata);
+ /**
+ * pre_destroy_cq - Prevent a cq from generating any new work
+ * completions, but not free any kernel resources
+ */
+ int (*pre_destroy_cq)(struct ib_cq *cq);
+ /**
+ * post_destroy_cq - Free all kernel resources
+ */
+ void (*post_destroy_cq)(struct ib_cq *cq);
struct ib_mr *(*get_dma_mr)(struct ib_pd *pd, int mr_access_flags);
struct ib_mr *(*reg_user_mr)(struct ib_pd *pd, u64 start, u64 length,
u64 virt_addr, int mr_access_flags,
+ struct ib_dmah *dmah,
struct ib_udata *udata);
struct ib_mr *(*reg_user_mr_dmabuf)(struct ib_pd *pd, u64 offset,
u64 length, u64 virt_addr, int fd,
int mr_access_flags,
+ struct ib_dmah *dmah,
struct uverbs_attr_bundle *attrs);
struct ib_mr *(*rereg_user_mr)(struct ib_mr *mr, int flags, u64 start,
u64 length, u64 virt_addr,
@@ -2560,6 +2598,9 @@ struct ib_device_ops {
struct ib_dm_alloc_attr *attr,
struct uverbs_attr_bundle *attrs);
int (*dealloc_dm)(struct ib_dm *dm, struct uverbs_attr_bundle *attrs);
+ int (*alloc_dmah)(struct ib_dmah *ibdmah,
+ struct uverbs_attr_bundle *attrs);
+ int (*dealloc_dmah)(struct ib_dmah *dmah, struct uverbs_attr_bundle *attrs);
struct ib_mr *(*reg_dm_mr)(struct ib_pd *pd, struct ib_dm *dm,
struct ib_dm_mr_attr *attr,
struct uverbs_attr_bundle *attrs);
@@ -2717,6 +2758,7 @@ struct ib_device_ops {
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_counters);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
+ DECLARE_RDMA_OBJ_SIZE(ib_dmah);
DECLARE_RDMA_OBJ_SIZE(ib_mw);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
DECLARE_RDMA_OBJ_SIZE(ib_qp);
@@ -2905,11 +2947,18 @@ struct ib_block_iter {
unsigned int __pg_bit; /* alignment of current block */
};
-struct ib_device *_ib_alloc_device(size_t size);
+struct ib_device *_ib_alloc_device(size_t size, struct net *net);
#define ib_alloc_device(drv_struct, member) \
container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
BUILD_BUG_ON_ZERO(offsetof( \
- struct drv_struct, member))), \
+ struct drv_struct, member)), \
+ &init_net), \
+ struct drv_struct, member)
+
+#define ib_alloc_device_with_net(drv_struct, member, net) \
+ container_of(_ib_alloc_device(sizeof(struct drv_struct) + \
+ BUILD_BUG_ON_ZERO(offsetof( \
+ struct drv_struct, member)), net), \
struct drv_struct, member)
void ib_dealloc_device(struct ib_device *device);
@@ -4794,11 +4843,17 @@ struct ib_ucontext *ib_uverbs_get_ucontext_file(struct ib_uverbs_file *ufile);
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs);
+bool rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs);
#else
static inline int uverbs_destroy_def_handler(struct uverbs_attr_bundle *attrs)
{
return 0;
}
+static inline bool
+rdma_uattrs_has_raw_cap(const struct uverbs_attr_bundle *attrs)
+{
+ return false;
+}
#endif
struct net_device *rdma_alloc_netdev(struct ib_device *device, u32 port_num,
@@ -4855,6 +4910,12 @@ static inline int ibdev_to_node(struct ib_device *ibdev)
bool rdma_dev_access_netns(const struct ib_device *device,
const struct net *net);
+bool rdma_dev_has_raw_cap(const struct ib_device *dev);
+static inline struct net *rdma_dev_net(struct ib_device *device)
+{
+ return read_pnet(&device->coredev.rdma_net);
+}
+
#define IB_ROCE_UDP_ENCAP_VALID_PORT_MIN (0xC000)
#define IB_ROCE_UDP_ENCAP_VALID_PORT_MAX (0xFFFF)
#define IB_GRH_FLOWLABEL_MASK (0x000FFFFF)
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 0d69ded73bf2..8a9bcf77dace 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -57,6 +57,10 @@ enum rdma_restrack_type {
*/
RDMA_RESTRACK_SRQ,
/**
+ * @RDMA_RESTRACK_DMAH: DMA handle
+ */
+ RDMA_RESTRACK_DMAH,
+ /**
* @RDMA_RESTRACK_MAX: Last entry, used for array dclarations
*/
RDMA_RESTRACK_MAX
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 510c88bfabd4..17fa4f6e5ea6 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -19,45 +19,22 @@
#ifdef CONFIG_RV_REACTORS
#define DECLARE_RV_REACTING_HELPERS(name, type) \
-static char REACT_MSG_##name[1024]; \
- \
-static inline char *format_react_msg_##name(type curr_state, type event) \
-{ \
- snprintf(REACT_MSG_##name, 1024, \
- "rv: monitor %s does not allow event %s on state %s\n", \
- #name, \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(curr_state)); \
- return REACT_MSG_##name; \
-} \
- \
-static void cond_react_##name(char *msg) \
+static void cond_react_##name(type curr_state, type event) \
{ \
- if (rv_##name.react) \
- rv_##name.react(msg); \
-} \
- \
-static bool rv_reacting_on_##name(void) \
-{ \
- return rv_reacting_on(); \
+ if (!rv_reacting_on() || !rv_##name.react) \
+ return; \
+ rv_##name.react("rv: monitor %s does not allow event %s on state %s\n", \
+ #name, \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(curr_state)); \
}
#else /* CONFIG_RV_REACTOR */
#define DECLARE_RV_REACTING_HELPERS(name, type) \
-static inline char *format_react_msg_##name(type curr_state, type event) \
-{ \
- return NULL; \
-} \
- \
-static void cond_react_##name(char *msg) \
+static void cond_react_##name(type curr_state, type event) \
{ \
return; \
-} \
- \
-static bool rv_reacting_on_##name(void) \
-{ \
- return 0; \
}
#endif
@@ -78,23 +55,6 @@ static inline void da_monitor_reset_##name(struct da_monitor *da_mon) \
} \
\
/* \
- * da_monitor_curr_state_##name - return the current state \
- */ \
-static inline type da_monitor_curr_state_##name(struct da_monitor *da_mon) \
-{ \
- return da_mon->curr_state; \
-} \
- \
-/* \
- * da_monitor_set_state_##name - set the new current state \
- */ \
-static inline void \
-da_monitor_set_state_##name(struct da_monitor *da_mon, enum states_##name state) \
-{ \
- da_mon->curr_state = state; \
-} \
- \
-/* \
* da_monitor_start_##name - start monitoring \
* \
* The monitor will ignore all events until monitoring is set to true. This \
@@ -150,65 +110,81 @@ static inline bool da_monitor_handling_event_##name(struct da_monitor *da_mon)
* Event handler for implicit monitors. Implicit monitor is the one which the
* handler does not need to specify which da_monitor to manipulate. Examples
* of implicit monitor are the per_cpu or the global ones.
+ *
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
#define DECLARE_DA_MON_MODEL_HANDLER_IMPLICIT(name, type) \
\
static inline bool \
da_event_##name(struct da_monitor *da_mon, enum events_##name event) \
{ \
- type curr_state = da_monitor_curr_state_##name(da_mon); \
- type next_state = model_get_next_state_##name(curr_state, event); \
- \
- if (next_state != INVALID_STATE) { \
- da_monitor_set_state_##name(da_mon, next_state); \
- \
- trace_event_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- \
- return true; \
+ enum states_##name curr_state, next_state; \
+ \
+ curr_state = READ_ONCE(da_mon->curr_state); \
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
+ next_state = model_get_next_state_##name(curr_state, event); \
+ if (next_state == INVALID_STATE) { \
+ cond_react_##name(curr_state, event); \
+ trace_error_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ return false; \
+ } \
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
+ trace_event_##name(model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ return true; \
+ } \
} \
\
- if (rv_reacting_on_##name()) \
- cond_react_##name(format_react_msg_##name(curr_state, event)); \
- \
- trace_error_##name(model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- \
+ trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
+ " retries reached for event %s, resetting monitor %s", \
+ model_get_event_name_##name(event), #name); \
return false; \
} \
/*
* Event handler for per_task monitors.
+ *
+ * Retry in case there is a race between getting and setting the next state,
+ * warn and reset the monitor if it runs out of retries. The monitor should be
+ * able to handle various orders.
*/
#define DECLARE_DA_MON_MODEL_HANDLER_PER_TASK(name, type) \
\
static inline bool da_event_##name(struct da_monitor *da_mon, struct task_struct *tsk, \
enum events_##name event) \
{ \
- type curr_state = da_monitor_curr_state_##name(da_mon); \
- type next_state = model_get_next_state_##name(curr_state, event); \
- \
- if (next_state != INVALID_STATE) { \
- da_monitor_set_state_##name(da_mon, next_state); \
- \
- trace_event_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event), \
- model_get_state_name_##name(next_state), \
- model_is_final_state_##name(next_state)); \
- \
- return true; \
+ enum states_##name curr_state, next_state; \
+ \
+ curr_state = READ_ONCE(da_mon->curr_state); \
+ for (int i = 0; i < MAX_DA_RETRY_RACING_EVENTS; i++) { \
+ next_state = model_get_next_state_##name(curr_state, event); \
+ if (next_state == INVALID_STATE) { \
+ cond_react_##name(curr_state, event); \
+ trace_error_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event)); \
+ return false; \
+ } \
+ if (likely(try_cmpxchg(&da_mon->curr_state, &curr_state, next_state))) { \
+ trace_event_##name(tsk->pid, \
+ model_get_state_name_##name(curr_state), \
+ model_get_event_name_##name(event), \
+ model_get_state_name_##name(next_state), \
+ model_is_final_state_##name(next_state)); \
+ return true; \
+ } \
} \
\
- if (rv_reacting_on_##name()) \
- cond_react_##name(format_react_msg_##name(curr_state, event)); \
- \
- trace_error_##name(tsk->pid, \
- model_get_state_name_##name(curr_state), \
- model_get_event_name_##name(event)); \
- \
+ trace_rv_retries_error(#name, model_get_event_name_##name(event)); \
+ pr_warn("rv: " __stringify(MAX_DA_RETRY_RACING_EVENTS) \
+ " retries reached for event %s, resetting monitor %s", \
+ model_get_event_name_##name(event), #name); \
return false; \
}
@@ -512,6 +488,30 @@ da_handle_start_event_##name(struct task_struct *tsk, enum events_##name event)
__da_handle_event_##name(da_mon, tsk, event); \
\
return 1; \
+} \
+ \
+/* \
+ * da_handle_start_run_event_##name - start monitoring and handle event \
+ * \
+ * This function is used to notify the monitor that the system is in the \
+ * initial state, so the monitor can start monitoring and handling event. \
+ */ \
+static inline bool \
+da_handle_start_run_event_##name(struct task_struct *tsk, enum events_##name event) \
+{ \
+ struct da_monitor *da_mon; \
+ \
+ if (!da_monitor_enabled_##name()) \
+ return 0; \
+ \
+ da_mon = da_get_monitor_##name(tsk); \
+ \
+ if (unlikely(!da_monitoring_##name(da_mon))) \
+ da_monitor_start_##name(da_mon); \
+ \
+ __da_handle_event_##name(da_mon, tsk, event); \
+ \
+ return 1; \
}
/*
diff --git a/include/rv/ltl_monitor.h b/include/rv/ltl_monitor.h
new file mode 100644
index 000000000000..67031a774e3d
--- /dev/null
+++ b/include/rv/ltl_monitor.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/**
+ * This file must be combined with the $(MODEL_NAME).h file generated by
+ * tools/verification/rvgen.
+ */
+
+#include <linux/args.h>
+#include <linux/rv.h>
+#include <linux/stringify.h>
+#include <linux/seq_buf.h>
+#include <rv/instrumentation.h>
+#include <trace/events/task.h>
+#include <trace/events/sched.h>
+
+#ifndef MONITOR_NAME
+#error "Please include $(MODEL_NAME).h generated by rvgen"
+#endif
+
+#ifdef CONFIG_RV_REACTORS
+#define RV_MONITOR_NAME CONCATENATE(rv_, MONITOR_NAME)
+static struct rv_monitor RV_MONITOR_NAME;
+
+static void rv_cond_react(struct task_struct *task)
+{
+ if (!rv_reacting_on() || !RV_MONITOR_NAME.react)
+ return;
+ RV_MONITOR_NAME.react("rv: "__stringify(MONITOR_NAME)": %s[%d]: violation detected\n",
+ task->comm, task->pid);
+}
+#else
+static void rv_cond_react(struct task_struct *task)
+{
+}
+#endif
+
+static int ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+
+static void ltl_atoms_fetch(struct task_struct *task, struct ltl_monitor *mon);
+static void ltl_atoms_init(struct task_struct *task, struct ltl_monitor *mon, bool task_creation);
+
+static struct ltl_monitor *ltl_get_monitor(struct task_struct *task)
+{
+ return &task->rv[ltl_monitor_slot].ltl_mon;
+}
+
+static void ltl_task_init(struct task_struct *task, bool task_creation)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ memset(&mon->states, 0, sizeof(mon->states));
+
+ for (int i = 0; i < LTL_NUM_ATOM; ++i)
+ __set_bit(i, mon->unknown_atoms);
+
+ ltl_atoms_init(task, mon, task_creation);
+ ltl_atoms_fetch(task, mon);
+}
+
+static void handle_task_newtask(void *data, struct task_struct *task, unsigned long flags)
+{
+ ltl_task_init(task, true);
+}
+
+static int ltl_monitor_init(void)
+{
+ struct task_struct *g, *p;
+ int ret, cpu;
+
+ ret = rv_get_task_monitor_slot();
+ if (ret < 0)
+ return ret;
+
+ ltl_monitor_slot = ret;
+
+ rv_attach_trace_probe(name, task_newtask, handle_task_newtask);
+
+ read_lock(&tasklist_lock);
+
+ for_each_process_thread(g, p)
+ ltl_task_init(p, false);
+
+ for_each_present_cpu(cpu)
+ ltl_task_init(idle_task(cpu), false);
+
+ read_unlock(&tasklist_lock);
+
+ return 0;
+}
+
+static void ltl_monitor_destroy(void)
+{
+ rv_detach_trace_probe(name, task_newtask, handle_task_newtask);
+
+ rv_put_task_monitor_slot(ltl_monitor_slot);
+ ltl_monitor_slot = RV_PER_TASK_MONITOR_INIT;
+}
+
+static void ltl_illegal_state(struct task_struct *task, struct ltl_monitor *mon)
+{
+ CONCATENATE(trace_error_, MONITOR_NAME)(task);
+ rv_cond_react(task);
+}
+
+static void ltl_attempt_start(struct task_struct *task, struct ltl_monitor *mon)
+{
+ if (rv_ltl_all_atoms_known(mon))
+ ltl_start(task, mon);
+}
+
+static inline void ltl_atom_set(struct ltl_monitor *mon, enum ltl_atom atom, bool value)
+{
+ __clear_bit(atom, mon->unknown_atoms);
+ if (value)
+ __set_bit(atom, mon->atoms);
+ else
+ __clear_bit(atom, mon->atoms);
+}
+
+static void
+ltl_trace_event(struct task_struct *task, struct ltl_monitor *mon, unsigned long *next_state)
+{
+ const char *format_str = "%s";
+ DECLARE_SEQ_BUF(atoms, 64);
+ char states[32], next[32];
+ int i;
+
+ if (!CONCATENATE(CONCATENATE(trace_event_, MONITOR_NAME), _enabled)())
+ return;
+
+ snprintf(states, sizeof(states), "%*pbl", RV_MAX_BA_STATES, mon->states);
+ snprintf(next, sizeof(next), "%*pbl", RV_MAX_BA_STATES, next_state);
+
+ for (i = 0; i < LTL_NUM_ATOM; ++i) {
+ if (test_bit(i, mon->atoms)) {
+ seq_buf_printf(&atoms, format_str, ltl_atom_str(i));
+ format_str = ",%s";
+ }
+ }
+
+ CONCATENATE(trace_event_, MONITOR_NAME)(task, states, atoms.buffer, next);
+}
+
+static void ltl_validate(struct task_struct *task, struct ltl_monitor *mon)
+{
+ DECLARE_BITMAP(next_states, RV_MAX_BA_STATES) = {0};
+
+ if (!rv_ltl_valid_state(mon))
+ return;
+
+ for (unsigned int i = 0; i < RV_NUM_BA_STATES; ++i) {
+ if (test_bit(i, mon->states))
+ ltl_possible_next_states(mon, i, next_states);
+ }
+
+ ltl_trace_event(task, mon, next_states);
+
+ memcpy(mon->states, next_states, sizeof(next_states));
+
+ if (!rv_ltl_valid_state(mon))
+ ltl_illegal_state(task, mon);
+}
+
+static void ltl_atom_update(struct task_struct *task, enum ltl_atom atom, bool value)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ ltl_atom_set(mon, atom, value);
+ ltl_atoms_fetch(task, mon);
+
+ if (!rv_ltl_valid_state(mon)) {
+ ltl_attempt_start(task, mon);
+ return;
+ }
+
+ ltl_validate(task, mon);
+}
+
+static void __maybe_unused ltl_atom_pulse(struct task_struct *task, enum ltl_atom atom, bool value)
+{
+ struct ltl_monitor *mon = ltl_get_monitor(task);
+
+ ltl_atom_update(task, atom, value);
+
+ ltl_atom_set(mon, atom, !value);
+ ltl_validate(task, mon);
+}
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 68dd49947d04..6d6500148c4b 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -184,6 +184,11 @@ struct scsi_device {
*/
unsigned force_runtime_start_on_system_start:1;
+ /*
+ * Set if the device is an ATA device.
+ */
+ unsigned is_ata:1;
+
unsigned removable:1;
unsigned changed:1; /* Data invalid due to media change */
unsigned busy:1; /* Used to prevent races */
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index d02b55261307..b908aacfef48 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -383,6 +383,8 @@ struct fc_rport { /* aka fc_starget_attrs */
struct work_struct stgt_delete_work;
struct work_struct rport_delete_work;
struct request_queue *rqst_q; /* bsg support */
+
+ struct workqueue_struct *devloss_work_q;
} __attribute__((aligned(sizeof(unsigned long))));
/* bit field values for struct fc_rport "flags" field: */
@@ -576,7 +578,6 @@ struct fc_host_attrs {
/* work queues for rport state manipulation */
struct workqueue_struct *work_q;
- struct workqueue_struct *devloss_work_q;
/* bsg support */
struct request_queue *rqst_q;
@@ -654,8 +655,6 @@ struct fc_host_attrs {
(((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
#define fc_host_work_q(x) \
(((struct fc_host_attrs *)(x)->shost_data)->work_q)
-#define fc_host_devloss_work_q(x) \
- (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q)
#define fc_host_dev_loss_tmo(x) \
(((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo)
#define fc_host_max_ct_payload(x) \
diff --git a/include/soc/spacemit/k1-syscon.h b/include/soc/spacemit/k1-syscon.h
new file mode 100644
index 000000000000..c59bd7a38e5b
--- /dev/null
+++ b/include/soc/spacemit/k1-syscon.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+/* SpacemiT clock and reset driver definitions for the K1 SoC */
+
+#ifndef __SOC_K1_SYSCON_H__
+#define __SOC_K1_SYSCON_H__
+
+/* Auxiliary device used to represent a CCU reset controller */
+struct spacemit_ccu_adev {
+ struct auxiliary_device adev;
+ struct regmap *regmap;
+};
+
+static inline struct spacemit_ccu_adev *
+to_spacemit_ccu_adev(struct auxiliary_device *adev)
+{
+ return container_of(adev, struct spacemit_ccu_adev, adev);
+}
+
+/* APBS register offset */
+#define APBS_PLL1_SWCR1 0x100
+#define APBS_PLL1_SWCR2 0x104
+#define APBS_PLL1_SWCR3 0x108
+#define APBS_PLL2_SWCR1 0x118
+#define APBS_PLL2_SWCR2 0x11c
+#define APBS_PLL2_SWCR3 0x120
+#define APBS_PLL3_SWCR1 0x124
+#define APBS_PLL3_SWCR2 0x128
+#define APBS_PLL3_SWCR3 0x12c
+
+/* MPMU register offset */
+#define MPMU_POSR 0x0010
+#define POSR_PLL1_LOCK BIT(27)
+#define POSR_PLL2_LOCK BIT(28)
+#define POSR_PLL3_LOCK BIT(29)
+#define MPMU_SUCCR 0x0014
+#define MPMU_ISCCR 0x0044
+#define MPMU_WDTPCR 0x0200
+#define MPMU_RIPCCR 0x0210
+#define MPMU_ACGR 0x1024
+#define MPMU_APBCSCR 0x1050
+#define MPMU_SUCCR_1 0x10b0
+
+/* APBC register offset */
+#define APBC_UART1_CLK_RST 0x00
+#define APBC_UART2_CLK_RST 0x04
+#define APBC_GPIO_CLK_RST 0x08
+#define APBC_PWM0_CLK_RST 0x0c
+#define APBC_PWM1_CLK_RST 0x10
+#define APBC_PWM2_CLK_RST 0x14
+#define APBC_PWM3_CLK_RST 0x18
+#define APBC_TWSI8_CLK_RST 0x20
+#define APBC_UART3_CLK_RST 0x24
+#define APBC_RTC_CLK_RST 0x28
+#define APBC_TWSI0_CLK_RST 0x2c
+#define APBC_TWSI1_CLK_RST 0x30
+#define APBC_TIMERS1_CLK_RST 0x34
+#define APBC_TWSI2_CLK_RST 0x38
+#define APBC_AIB_CLK_RST 0x3c
+#define APBC_TWSI4_CLK_RST 0x40
+#define APBC_TIMERS2_CLK_RST 0x44
+#define APBC_ONEWIRE_CLK_RST 0x48
+#define APBC_TWSI5_CLK_RST 0x4c
+#define APBC_DRO_CLK_RST 0x58
+#define APBC_IR_CLK_RST 0x5c
+#define APBC_TWSI6_CLK_RST 0x60
+#define APBC_COUNTER_CLK_SEL 0x64
+#define APBC_TWSI7_CLK_RST 0x68
+#define APBC_TSEN_CLK_RST 0x6c
+#define APBC_UART4_CLK_RST 0x70
+#define APBC_UART5_CLK_RST 0x74
+#define APBC_UART6_CLK_RST 0x78
+#define APBC_SSP3_CLK_RST 0x7c
+#define APBC_SSPA0_CLK_RST 0x80
+#define APBC_SSPA1_CLK_RST 0x84
+#define APBC_IPC_AP2AUD_CLK_RST 0x90
+#define APBC_UART7_CLK_RST 0x94
+#define APBC_UART8_CLK_RST 0x98
+#define APBC_UART9_CLK_RST 0x9c
+#define APBC_CAN0_CLK_RST 0xa0
+#define APBC_PWM4_CLK_RST 0xa8
+#define APBC_PWM5_CLK_RST 0xac
+#define APBC_PWM6_CLK_RST 0xb0
+#define APBC_PWM7_CLK_RST 0xb4
+#define APBC_PWM8_CLK_RST 0xb8
+#define APBC_PWM9_CLK_RST 0xbc
+#define APBC_PWM10_CLK_RST 0xc0
+#define APBC_PWM11_CLK_RST 0xc4
+#define APBC_PWM12_CLK_RST 0xc8
+#define APBC_PWM13_CLK_RST 0xcc
+#define APBC_PWM14_CLK_RST 0xd0
+#define APBC_PWM15_CLK_RST 0xd4
+#define APBC_PWM16_CLK_RST 0xd8
+#define APBC_PWM17_CLK_RST 0xdc
+#define APBC_PWM18_CLK_RST 0xe0
+#define APBC_PWM19_CLK_RST 0xe4
+
+/* APMU register offset */
+#define APMU_JPG_CLK_RES_CTRL 0x020
+#define APMU_CSI_CCIC2_CLK_RES_CTRL 0x024
+#define APMU_ISP_CLK_RES_CTRL 0x038
+#define APMU_LCD_CLK_RES_CTRL1 0x044
+#define APMU_LCD_SPI_CLK_RES_CTRL 0x048
+#define APMU_LCD_CLK_RES_CTRL2 0x04c
+#define APMU_CCIC_CLK_RES_CTRL 0x050
+#define APMU_SDH0_CLK_RES_CTRL 0x054
+#define APMU_SDH1_CLK_RES_CTRL 0x058
+#define APMU_USB_CLK_RES_CTRL 0x05c
+#define APMU_QSPI_CLK_RES_CTRL 0x060
+#define APMU_DMA_CLK_RES_CTRL 0x064
+#define APMU_AES_CLK_RES_CTRL 0x068
+#define APMU_VPU_CLK_RES_CTRL 0x0a4
+#define APMU_GPU_CLK_RES_CTRL 0x0cc
+#define APMU_SDH2_CLK_RES_CTRL 0x0e0
+#define APMU_PMUA_MC_CTRL 0x0e8
+#define APMU_PMU_CC2_AP 0x100
+#define APMU_PMUA_EM_CLK_RES_CTRL 0x104
+#define APMU_AUDIO_CLK_RES_CTRL 0x14c
+#define APMU_HDMI_CLK_RES_CTRL 0x1b8
+#define APMU_CCI550_CLK_CTRL 0x300
+#define APMU_ACLK_CLK_CTRL 0x388
+#define APMU_CPU_C0_CLK_CTRL 0x38C
+#define APMU_CPU_C1_CLK_CTRL 0x390
+#define APMU_PCIE_CLK_RES_CTRL_0 0x3cc
+#define APMU_PCIE_CLK_RES_CTRL_1 0x3d4
+#define APMU_PCIE_CLK_RES_CTRL_2 0x3dc
+#define APMU_EMAC0_CLK_RES_CTRL 0x3e4
+#define APMU_EMAC1_CLK_RES_CTRL 0x3ec
+
+/* RCPU register offsets */
+#define RCPU_SSP0_CLK_RST 0x0028
+#define RCPU_I2C0_CLK_RST 0x0030
+#define RCPU_UART1_CLK_RST 0x003c
+#define RCPU_CAN_CLK_RST 0x0048
+#define RCPU_IR_CLK_RST 0x004c
+#define RCPU_UART0_CLK_RST 0x00d8
+#define AUDIO_HDMI_CLK_CTRL 0x2044
+
+/* RCPU2 register offsets */
+#define RCPU2_PWM0_CLK_RST 0x0000
+#define RCPU2_PWM1_CLK_RST 0x0004
+#define RCPU2_PWM2_CLK_RST 0x0008
+#define RCPU2_PWM3_CLK_RST 0x000c
+#define RCPU2_PWM4_CLK_RST 0x0010
+#define RCPU2_PWM5_CLK_RST 0x0014
+#define RCPU2_PWM6_CLK_RST 0x0018
+#define RCPU2_PWM7_CLK_RST 0x001c
+#define RCPU2_PWM8_CLK_RST 0x0020
+#define RCPU2_PWM9_CLK_RST 0x0024
+
+/* APBC2 register offsets */
+#define APBC2_UART1_CLK_RST 0x0000
+#define APBC2_SSP2_CLK_RST 0x0004
+#define APBC2_TWSI3_CLK_RST 0x0008
+#define APBC2_RTC_CLK_RST 0x000c
+#define APBC2_TIMERS0_CLK_RST 0x0010
+#define APBC2_KPC_CLK_RST 0x0014
+#define APBC2_GPIO_CLK_RST 0x001c
+
+#endif /* __SOC_K1_SYSCON_H__ */
diff --git a/include/trace/events/alarmtimer.h b/include/trace/events/alarmtimer.h
index 13483c7ca70b..8e9c76a7f21b 100644
--- a/include/trace/events/alarmtimer.h
+++ b/include/trace/events/alarmtimer.h
@@ -20,6 +20,7 @@ TRACE_DEFINE_ENUM(ALARM_BOOTTIME_FREEZER);
{ 1 << ALARM_REALTIME_FREEZER, "REALTIME Freezer" }, \
{ 1 << ALARM_BOOTTIME_FREEZER, "BOOTTIME Freezer" })
+#ifdef CONFIG_RTC_CLASS
TRACE_EVENT(alarmtimer_suspend,
TP_PROTO(ktime_t expires, int flag),
@@ -41,6 +42,7 @@ TRACE_EVENT(alarmtimer_suspend,
__entry->expires
)
);
+#endif /* CONFIG_RTC_CLASS */
DECLARE_EVENT_CLASS(alarm_class,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index d54fe354b390..7e418f065b94 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -687,7 +687,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( loff_t, range_start )
__field( loff_t, range_end )
__field( char, for_kupdate )
- __field( char, for_reclaim )
__field( char, range_cyclic )
__field( unsigned long, writeback_index )
__field( u64, root_objectid )
@@ -701,7 +700,6 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->range_start = wbc->range_start;
__entry->range_end = wbc->range_end;
__entry->for_kupdate = wbc->for_kupdate;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->writeback_index = inode->i_mapping->writeback_index;
__entry->root_objectid = btrfs_root_id(BTRFS_I(inode)->root);
@@ -710,13 +708,12 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu "
"nr_to_write=%ld pages_skipped=%ld range_start=%llu "
"range_end=%llu for_kupdate=%d "
- "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
+ "range_cyclic=%d writeback_index=%lu",
show_root_type(__entry->root_objectid),
__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
__entry->range_start, __entry->range_end,
- __entry->for_kupdate,
- __entry->for_reclaim, __entry->range_cyclic,
+ __entry->for_kupdate, __entry->range_cyclic,
__entry->writeback_index)
);
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index 7d332387be6c..ba9229af9a34 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -257,53 +257,6 @@ DEFINE_EVENT(cgroup_rstat, cgroup_rstat_unlock,
TP_ARGS(cgrp, cpu, contended)
);
-/*
- * Related to per CPU locks:
- * global rstat_base_cpu_lock for base stats
- * cgroup_subsys::rstat_ss_cpu_lock for subsystem stats
- */
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_lock_contended_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_locked_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
-DEFINE_EVENT(cgroup_rstat, cgroup_rstat_cpu_unlock_fastpath,
-
- TP_PROTO(struct cgroup *cgrp, int cpu, bool contended),
-
- TP_ARGS(cgrp, cpu, contended)
-);
-
#endif /* _TRACE_CGROUP_H */
/* This part must be outside protection */
diff --git a/include/trace/events/damon.h b/include/trace/events/damon.h
index da4bd9fd1162..852d725afea2 100644
--- a/include/trace/events/damon.h
+++ b/include/trace/events/damon.h
@@ -9,6 +9,30 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+TRACE_EVENT(damos_esz,
+
+ TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
+ unsigned long esz),
+
+ TP_ARGS(context_idx, scheme_idx, esz),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, context_idx)
+ __field(unsigned int, scheme_idx)
+ __field(unsigned long, esz)
+ ),
+
+ TP_fast_assign(
+ __entry->context_idx = context_idx;
+ __entry->scheme_idx = scheme_idx;
+ __entry->esz = esz;
+ ),
+
+ TP_printk("ctx_idx=%u scheme_idx=%u esz=%lu",
+ __entry->context_idx, __entry->scheme_idx,
+ __entry->esz)
+);
+
TRACE_EVENT_CONDITION(damos_before_apply,
TP_PROTO(unsigned int context_idx, unsigned int scheme_idx,
@@ -48,6 +72,23 @@ TRACE_EVENT_CONDITION(damos_before_apply,
__entry->nr_accesses, __entry->age)
);
+TRACE_EVENT(damon_monitor_intervals_tune,
+
+ TP_PROTO(unsigned long sample_us),
+
+ TP_ARGS(sample_us),
+
+ TP_STRUCT__entry(
+ __field(unsigned long, sample_us)
+ ),
+
+ TP_fast_assign(
+ __entry->sample_us = sample_us;
+ ),
+
+ TP_printk("sample_us=%lu", __entry->sample_us)
+);
+
TRACE_EVENT(damon_aggregated,
TP_PROTO(unsigned int target_id, struct damon_region *r,
diff --git a/include/trace/events/dma_fence.h b/include/trace/events/dma_fence.h
index a4de3df8500b..4814a65b68dc 100644
--- a/include/trace/events/dma_fence.h
+++ b/include/trace/events/dma_fence.h
@@ -16,6 +16,36 @@ DECLARE_EVENT_CLASS(dma_fence,
TP_ARGS(fence),
TP_STRUCT__entry(
+ __string(driver, dma_fence_driver_name(fence))
+ __string(timeline, dma_fence_timeline_name(fence))
+ __field(unsigned int, context)
+ __field(unsigned int, seqno)
+ ),
+
+ TP_fast_assign(
+ __assign_str(driver);
+ __assign_str(timeline);
+ __entry->context = fence->context;
+ __entry->seqno = fence->seqno;
+ ),
+
+ TP_printk("driver=%s timeline=%s context=%u seqno=%u",
+ __get_str(driver), __get_str(timeline), __entry->context,
+ __entry->seqno)
+);
+
+/*
+ * Safe only for call sites which are guaranteed to not race with fence
+ * signaling,holding the fence->lock and having checked for not signaled, or the
+ * signaling path itself.
+ */
+DECLARE_EVENT_CLASS(dma_fence_unsignaled,
+
+ TP_PROTO(struct dma_fence *fence),
+
+ TP_ARGS(fence),
+
+ TP_STRUCT__entry(
__string(driver, fence->ops->get_driver_name(fence))
__string(timeline, fence->ops->get_timeline_name(fence))
__field(unsigned int, context)
@@ -34,14 +64,14 @@ DECLARE_EVENT_CLASS(dma_fence,
__entry->seqno)
);
-DEFINE_EVENT(dma_fence, dma_fence_emit,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_emit,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_init,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_init,
TP_PROTO(struct dma_fence *fence),
@@ -55,14 +85,14 @@ DEFINE_EVENT(dma_fence, dma_fence_destroy,
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_enable_signal,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_enable_signal,
TP_PROTO(struct dma_fence *fence),
TP_ARGS(fence)
);
-DEFINE_EVENT(dma_fence, dma_fence_signaled,
+DEFINE_EVENT(dma_fence_unsignaled, dma_fence_signaled,
TP_PROTO(struct dma_fence *fence),
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 6f9cf2811733..a374e7ea7e57 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -23,10 +23,7 @@ struct partial_cluster;
#define show_mballoc_flags(flags) __print_flags(flags, "|", \
{ EXT4_MB_HINT_MERGE, "HINT_MERGE" }, \
- { EXT4_MB_HINT_RESERVED, "HINT_RESV" }, \
- { EXT4_MB_HINT_METADATA, "HINT_MDATA" }, \
{ EXT4_MB_HINT_FIRST, "HINT_FIRST" }, \
- { EXT4_MB_HINT_BEST, "HINT_BEST" }, \
{ EXT4_MB_HINT_DATA, "HINT_DATA" }, \
{ EXT4_MB_HINT_NOPREALLOC, "HINT_NOPREALLOC" }, \
{ EXT4_MB_HINT_GROUP_ALLOC, "HINT_GRP_ALLOC" }, \
@@ -483,16 +480,17 @@ TRACE_EVENT(ext4_writepages,
(unsigned long) __entry->writeback_index)
);
-TRACE_EVENT(ext4_da_write_pages,
- TP_PROTO(struct inode *inode, pgoff_t first_page,
+TRACE_EVENT(ext4_da_write_folios_start,
+ TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos,
struct writeback_control *wbc),
- TP_ARGS(inode, first_page, wbc),
+ TP_ARGS(inode, start_pos, next_pos, wbc),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
- __field( pgoff_t, first_page )
+ __field( loff_t, start_pos )
+ __field( loff_t, next_pos )
__field( long, nr_to_write )
__field( int, sync_mode )
),
@@ -500,18 +498,48 @@ TRACE_EVENT(ext4_da_write_pages,
TP_fast_assign(
__entry->dev = inode->i_sb->s_dev;
__entry->ino = inode->i_ino;
- __entry->first_page = first_page;
+ __entry->start_pos = start_pos;
+ __entry->next_pos = next_pos;
__entry->nr_to_write = wbc->nr_to_write;
__entry->sync_mode = wbc->sync_mode;
),
- TP_printk("dev %d,%d ino %lu first_page %lu nr_to_write %ld "
- "sync_mode %d",
+ TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld sync_mode %d",
MAJOR(__entry->dev), MINOR(__entry->dev),
- (unsigned long) __entry->ino, __entry->first_page,
+ (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
__entry->nr_to_write, __entry->sync_mode)
);
+TRACE_EVENT(ext4_da_write_folios_end,
+ TP_PROTO(struct inode *inode, loff_t start_pos, loff_t next_pos,
+ struct writeback_control *wbc, int ret),
+
+ TP_ARGS(inode, start_pos, next_pos, wbc, ret),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( ino_t, ino )
+ __field( loff_t, start_pos )
+ __field( loff_t, next_pos )
+ __field( long, nr_to_write )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start_pos = start_pos;
+ __entry->next_pos = next_pos;
+ __entry->nr_to_write = wbc->nr_to_write;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev %d,%d ino %lu start_pos 0x%llx next_pos 0x%llx nr_to_write %ld ret %d",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ (unsigned long) __entry->ino, __entry->start_pos, __entry->next_pos,
+ __entry->nr_to_write, __entry->ret)
+);
+
TRACE_EVENT(ext4_da_write_pages_extent,
TP_PROTO(struct inode *inode, struct ext4_map_blocks *map),
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index 76b56f78abb0..50ebc1290ab0 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -15,7 +15,7 @@ DECLARE_EVENT_CLASS(dax_pmd_fault_class,
__field(unsigned long, ino)
__field(unsigned long, vm_start)
__field(unsigned long, vm_end)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
__field(pgoff_t, max_pgoff)
@@ -67,7 +67,7 @@ DECLARE_EVENT_CLASS(dax_pmd_load_hole_class,
TP_ARGS(inode, vmf, zero_folio, radix_entry),
TP_STRUCT__entry(
__field(unsigned long, ino)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(struct folio *, zero_folio)
__field(void *, radix_entry)
@@ -107,7 +107,7 @@ DECLARE_EVENT_CLASS(dax_pte_fault_class,
TP_ARGS(inode, vmf, result),
TP_STRUCT__entry(
__field(unsigned long, ino)
- __field(unsigned long, vm_flags)
+ __field(vm_flags_t, vm_flags)
__field(unsigned long, address)
__field(pgoff_t, pgoff)
__field(dev_t, dev)
diff --git a/include/trace/events/ipi.h b/include/trace/events/ipi.h
index 3de9bfc982ce..9912f0ded81d 100644
--- a/include/trace/events/ipi.h
+++ b/include/trace/events/ipi.h
@@ -7,34 +7,6 @@
#include <linux/tracepoint.h>
-/**
- * ipi_raise - called when a smp cross call is made
- *
- * @mask: mask of recipient CPUs for the IPI
- * @reason: string identifying the IPI purpose
- *
- * It is necessary for @reason to be a static string declared with
- * __tracepoint_string.
- */
-TRACE_EVENT(ipi_raise,
-
- TP_PROTO(const struct cpumask *mask, const char *reason),
-
- TP_ARGS(mask, reason),
-
- TP_STRUCT__entry(
- __bitmask(target_cpus, nr_cpumask_bits)
- __field(const char *, reason)
- ),
-
- TP_fast_assign(
- __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
- __entry->reason = reason;
- ),
-
- TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
-);
-
TRACE_EVENT(ipi_send_cpu,
TP_PROTO(const unsigned int cpu, unsigned long callsite, void *callback),
@@ -79,6 +51,35 @@ TRACE_EVENT(ipi_send_cpumask,
__get_cpumask(cpumask), __entry->callsite, __entry->callback)
);
+#ifdef CONFIG_HAVE_EXTRA_IPI_TRACEPOINTS
+/**
+ * ipi_raise - called when a smp cross call is made
+ *
+ * @mask: mask of recipient CPUs for the IPI
+ * @reason: string identifying the IPI purpose
+ *
+ * It is necessary for @reason to be a static string declared with
+ * __tracepoint_string.
+ */
+TRACE_EVENT(ipi_raise,
+
+ TP_PROTO(const struct cpumask *mask, const char *reason),
+
+ TP_ARGS(mask, reason),
+
+ TP_STRUCT__entry(
+ __bitmask(target_cpus, nr_cpumask_bits)
+ __field(const char *, reason)
+ ),
+
+ TP_fast_assign(
+ __assign_bitmask(target_cpus, cpumask_bits(mask), nr_cpumask_bits);
+ __entry->reason = reason;
+ ),
+
+ TP_printk("target_mask=%s (%s)", __get_bitmask(target_cpus), __entry->reason)
+);
+
DECLARE_EVENT_CLASS(ipi_handler,
TP_PROTO(const char *reason),
@@ -127,6 +128,7 @@ DEFINE_EVENT(ipi_handler, ipi_exit,
TP_ARGS(reason)
);
+#endif /* CONFIG_HAVE_EXTRA_IPI_TRACEPOINTS */
#endif /* _TRACE_IPI_H */
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index f74925a6cf69..474358773abe 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -304,44 +304,6 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
-TRACE_EVENT(mm_alloc_contig_migrate_range_info,
-
- TP_PROTO(unsigned long start,
- unsigned long end,
- unsigned long nr_migrated,
- unsigned long nr_reclaimed,
- unsigned long nr_mapped,
- int migratetype),
-
- TP_ARGS(start, end, nr_migrated, nr_reclaimed, nr_mapped, migratetype),
-
- TP_STRUCT__entry(
- __field(unsigned long, start)
- __field(unsigned long, end)
- __field(unsigned long, nr_migrated)
- __field(unsigned long, nr_reclaimed)
- __field(unsigned long, nr_mapped)
- __field(int, migratetype)
- ),
-
- TP_fast_assign(
- __entry->start = start;
- __entry->end = end;
- __entry->nr_migrated = nr_migrated;
- __entry->nr_reclaimed = nr_reclaimed;
- __entry->nr_mapped = nr_mapped;
- __entry->migratetype = migratetype;
- ),
-
- TP_printk("start=0x%lx end=0x%lx migratetype=%d nr_migrated=%lu nr_reclaimed=%lu nr_mapped=%lu",
- __entry->start,
- __entry->end,
- __entry->migratetype,
- __entry->nr_migrated,
- __entry->nr_reclaimed,
- __entry->nr_mapped)
-);
-
TRACE_EVENT(mm_setup_per_zone_wmarks,
TP_PROTO(struct zone *zone),
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index fc7d0f8ff078..8b7252b8d751 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -82,95 +82,15 @@ TRACE_EVENT(kvm_set_irq,
TP_printk("gsi %u level %d source %d",
__entry->gsi, __entry->level, __entry->irq_source_id)
);
-#endif /* defined(CONFIG_HAVE_KVM_IRQCHIP) */
-
-#if defined(__KVM_HAVE_IOAPIC)
-#define kvm_deliver_mode \
- {0x0, "Fixed"}, \
- {0x1, "LowPrio"}, \
- {0x2, "SMI"}, \
- {0x3, "Res3"}, \
- {0x4, "NMI"}, \
- {0x5, "INIT"}, \
- {0x6, "SIPI"}, \
- {0x7, "ExtINT"}
-
-TRACE_EVENT(kvm_ioapic_set_irq,
- TP_PROTO(__u64 e, int pin, bool coalesced),
- TP_ARGS(e, pin, coalesced),
-
- TP_STRUCT__entry(
- __field( __u64, e )
- __field( int, pin )
- __field( bool, coalesced )
- ),
-
- TP_fast_assign(
- __entry->e = e;
- __entry->pin = pin;
- __entry->coalesced = coalesced;
- ),
-
- TP_printk("pin %u dst %x vec %u (%s|%s|%s%s)%s",
- __entry->pin, (u8)(__entry->e >> 56), (u8)__entry->e,
- __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
- (__entry->e & (1<<11)) ? "logical" : "physical",
- (__entry->e & (1<<15)) ? "level" : "edge",
- (__entry->e & (1<<16)) ? "|masked" : "",
- __entry->coalesced ? " (coalesced)" : "")
-);
-TRACE_EVENT(kvm_ioapic_delayed_eoi_inj,
- TP_PROTO(__u64 e),
- TP_ARGS(e),
-
- TP_STRUCT__entry(
- __field( __u64, e )
- ),
-
- TP_fast_assign(
- __entry->e = e;
- ),
-
- TP_printk("dst %x vec %u (%s|%s|%s%s)",
- (u8)(__entry->e >> 56), (u8)__entry->e,
- __print_symbolic((__entry->e >> 8 & 0x7), kvm_deliver_mode),
- (__entry->e & (1<<11)) ? "logical" : "physical",
- (__entry->e & (1<<15)) ? "level" : "edge",
- (__entry->e & (1<<16)) ? "|masked" : "")
-);
-
-TRACE_EVENT(kvm_msi_set_irq,
- TP_PROTO(__u64 address, __u64 data),
- TP_ARGS(address, data),
-
- TP_STRUCT__entry(
- __field( __u64, address )
- __field( __u64, data )
- ),
-
- TP_fast_assign(
- __entry->address = address;
- __entry->data = data;
- ),
-
- TP_printk("dst %llx vec %u (%s|%s|%s%s)",
- (u8)(__entry->address >> 12) | ((__entry->address >> 32) & 0xffffff00),
- (u8)__entry->data,
- __print_symbolic((__entry->data >> 8 & 0x7), kvm_deliver_mode),
- (__entry->address & (1<<2)) ? "logical" : "physical",
- (__entry->data & (1<<15)) ? "level" : "edge",
- (__entry->address & (1<<3)) ? "|rh" : "")
-);
+#ifdef CONFIG_KVM_IOAPIC
#define kvm_irqchips \
{KVM_IRQCHIP_PIC_MASTER, "PIC master"}, \
{KVM_IRQCHIP_PIC_SLAVE, "PIC slave"}, \
{KVM_IRQCHIP_IOAPIC, "IOAPIC"}
-#endif /* defined(__KVM_HAVE_IOAPIC) */
-
-#if defined(CONFIG_HAVE_KVM_IRQCHIP)
+#endif /* CONFIG_KVM_IOAPIC */
#ifdef kvm_irqchips
#define kvm_ack_irq_string "irqchip %s pin %u"
@@ -473,6 +393,33 @@ TRACE_EVENT(kvm_dirty_ring_exit,
TP_printk("vcpu %d", __entry->vcpu_id)
);
+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+/*
+ * @start: Starting address of guest memory range
+ * @end: End address of guest memory range
+ * @attr: The value of the attribute being set.
+ */
+TRACE_EVENT(kvm_vm_set_mem_attributes,
+ TP_PROTO(gfn_t start, gfn_t end, unsigned long attr),
+ TP_ARGS(start, end, attr),
+
+ TP_STRUCT__entry(
+ __field(gfn_t, start)
+ __field(gfn_t, end)
+ __field(unsigned long, attr)
+ ),
+
+ TP_fast_assign(
+ __entry->start = start;
+ __entry->end = end;
+ __entry->attr = attr;
+ ),
+
+ TP_printk("%#016llx -- %#016llx [0x%lx]",
+ __entry->start, __entry->end, __entry->attr)
+);
+#endif /* CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES */
+
TRACE_EVENT(kvm_unmap_hva_range,
TP_PROTO(unsigned long start, unsigned long end),
TP_ARGS(start, end),
diff --git a/include/trace/events/mmap.h b/include/trace/events/mmap.h
index f8d61485de16..ee2843a5daef 100644
--- a/include/trace/events/mmap.h
+++ b/include/trace/events/mmap.h
@@ -43,58 +43,6 @@ TRACE_EVENT(vm_unmapped_area,
__entry->align_offset)
);
-TRACE_EVENT(vma_mas_szero,
- TP_PROTO(struct maple_tree *mt, unsigned long start,
- unsigned long end),
-
- TP_ARGS(mt, start, end),
-
- TP_STRUCT__entry(
- __field(struct maple_tree *, mt)
- __field(unsigned long, start)
- __field(unsigned long, end)
- ),
-
- TP_fast_assign(
- __entry->mt = mt;
- __entry->start = start;
- __entry->end = end;
- ),
-
- TP_printk("mt_mod %p, (NULL), SNULL, %lu, %lu,",
- __entry->mt,
- (unsigned long) __entry->start,
- (unsigned long) __entry->end
- )
-);
-
-TRACE_EVENT(vma_store,
- TP_PROTO(struct maple_tree *mt, struct vm_area_struct *vma),
-
- TP_ARGS(mt, vma),
-
- TP_STRUCT__entry(
- __field(struct maple_tree *, mt)
- __field(struct vm_area_struct *, vma)
- __field(unsigned long, vm_start)
- __field(unsigned long, vm_end)
- ),
-
- TP_fast_assign(
- __entry->mt = mt;
- __entry->vma = vma;
- __entry->vm_start = vma->vm_start;
- __entry->vm_end = vma->vm_end - 1;
- ),
-
- TP_printk("mt_mod %p, (%p), STORE, %lu, %lu,",
- __entry->mt, __entry->vma,
- (unsigned long) __entry->vm_start,
- (unsigned long) __entry->vm_end
- )
-);
-
-
TRACE_EVENT(exit_mmap,
TP_PROTO(struct mm_struct *mm),
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 6c631eec23e3..82904291c2b8 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -62,6 +62,7 @@ TRACE_EVENT(cpu_idle_miss,
(unsigned long)__entry->state, (__entry->below)?"below":"above")
);
+#ifdef CONFIG_ARM_PSCI_CPUIDLE
DECLARE_EVENT_CLASS(psci_domain_idle,
TP_PROTO(unsigned int cpu_id, unsigned int state, bool s2idle),
@@ -98,28 +99,7 @@ DEFINE_EVENT(psci_domain_idle, psci_domain_idle_exit,
TP_ARGS(cpu_id, state, s2idle)
);
-
-TRACE_EVENT(powernv_throttle,
-
- TP_PROTO(int chip_id, const char *reason, int pmax),
-
- TP_ARGS(chip_id, reason, pmax),
-
- TP_STRUCT__entry(
- __field(int, chip_id)
- __string(reason, reason)
- __field(int, pmax)
- ),
-
- TP_fast_assign(
- __entry->chip_id = chip_id;
- __assign_str(reason);
- __entry->pmax = pmax;
- ),
-
- TP_printk("Chip %d Pmax %d %s", __entry->chip_id,
- __entry->pmax, __get_str(reason))
-);
+#endif
TRACE_EVENT(pstate_sample,
@@ -232,6 +212,7 @@ TRACE_EVENT(cpu_frequency_limits,
(unsigned long)__entry->cpu_id)
);
+#ifdef CONFIG_PM_SLEEP
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
@@ -280,6 +261,7 @@ TRACE_EVENT(device_pm_callback_end,
TP_printk("%s %s, err=%d",
__get_str(driver), __get_str(device), __entry->error)
);
+#endif
TRACE_EVENT(suspend_resume,
@@ -337,6 +319,7 @@ DEFINE_EVENT(wakeup_source, wakeup_source_deactivate,
TP_ARGS(name, state)
);
+#ifdef CONFIG_ARCH_OMAP2PLUS
/*
* The power domain events are used for power domains transitions
*/
@@ -368,6 +351,7 @@ DEFINE_EVENT(power_domain, power_domain_target,
TP_ARGS(name, state, cpu_id)
);
+#endif
/*
* CPU latency QoS events used for global CPU latency QoS list updates
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 4e6b2910cec3..9f16ee2603ba 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -628,6 +628,7 @@ TRACE_EVENT(sched_process_hang,
);
#endif /* CONFIG_DETECT_HUNG_TASK */
+#ifdef CONFIG_NUMA_BALANCING
/*
* Tracks migration of tasks from one runqueue to another. Can be used to
* detect if automatic NUMA balancing is bouncing between nodes.
@@ -720,7 +721,6 @@ DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
);
-#ifdef CONFIG_NUMA_BALANCING
#define NUMAB_SKIP_REASON \
EM( NUMAB_SKIP_UNSUITABLE, "unsuitable" ) \
EM( NUMAB_SKIP_SHARED_RO, "shared_ro" ) \
@@ -882,18 +882,22 @@ DECLARE_TRACE(sched_compute_energy,
TP_ARGS(p, dst_cpu, energy, max_util, busy_time));
DECLARE_TRACE(sched_entry,
- TP_PROTO(bool preempt, unsigned long ip),
- TP_ARGS(preempt, ip));
+ TP_PROTO(bool preempt),
+ TP_ARGS(preempt));
DECLARE_TRACE(sched_exit,
- TP_PROTO(bool is_switch, unsigned long ip),
- TP_ARGS(is_switch, ip));
+ TP_PROTO(bool is_switch),
+ TP_ARGS(is_switch));
DECLARE_TRACE_CONDITION(sched_set_state,
TP_PROTO(struct task_struct *tsk, int state),
TP_ARGS(tsk, state),
TP_CONDITION(!!(tsk->__state) != !!state));
+DECLARE_TRACE(sched_set_need_resched,
+ TP_PROTO(struct task_struct *tsk, int cpu, int tif),
+ TP_ARGS(tsk, cpu, tif));
+
#endif /* _TRACE_SCHED_H */
/* This part must be outside protection */
diff --git a/include/trace/events/scsi.h b/include/trace/events/scsi.h
index bf6cc98d9122..c36c72ab7f2b 100644
--- a/include/trace/events/scsi.h
+++ b/include/trace/events/scsi.h
@@ -200,6 +200,14 @@ TRACE_EVENT(scsi_dispatch_cmd_start,
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len))
);
+#define scsi_rtn_name(result) { result, #result }
+#define show_rtn_name(val) \
+ __print_symbolic(val, \
+ scsi_rtn_name(SCSI_MLQUEUE_HOST_BUSY), \
+ scsi_rtn_name(SCSI_MLQUEUE_DEVICE_BUSY), \
+ scsi_rtn_name(SCSI_MLQUEUE_EH_RETRY), \
+ scsi_rtn_name(SCSI_MLQUEUE_TARGET_BUSY))
+
TRACE_EVENT(scsi_dispatch_cmd_error,
TP_PROTO(struct scsi_cmnd *cmd, int rtn),
@@ -240,14 +248,15 @@ TRACE_EVENT(scsi_dispatch_cmd_error,
TP_printk("host_no=%u channel=%u id=%u lun=%u data_sgl=%u prot_sgl=%u" \
" prot_op=%s driver_tag=%d scheduler_tag=%d cmnd=(%s %s raw=%s)" \
- " rtn=%d",
+ " rtn=%s",
__entry->host_no, __entry->channel, __entry->id,
__entry->lun, __entry->data_sglen, __entry->prot_sglen,
show_prot_op_name(__entry->prot_op), __entry->driver_tag,
__entry->scheduler_tag, show_opcode_name(__entry->opcode),
__parse_cdb(__get_dynamic_array(cmnd), __entry->cmd_len),
__print_hex(__get_dynamic_array(cmnd), __entry->cmd_len),
- __entry->rtn)
+ show_rtn_name(__entry->rtn)
+ )
);
DECLARE_EVENT_CLASS(scsi_cmd_done_timeout_template,
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index f50048af5fcc..c8fe879d5828 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+#ifdef CONFIG_PPC_BOOK3S_64
DECLARE_EVENT_CLASS(hugepage_set,
TP_PROTO(unsigned long addr, unsigned long pte),
@@ -66,6 +67,7 @@ DEFINE_EVENT(hugepage_update, hugepage_update_pud,
TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set),
TP_ARGS(addr, pud, clr, set)
);
+#endif /* CONFIG_PPC_BOOK3S_64 */
DECLARE_EVENT_CLASS(migration_pmd,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 0ff388131fc9..1e23919c0da9 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -459,7 +459,6 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, sync_mode)
__field(int, for_kupdate)
__field(int, for_background)
- __field(int, for_reclaim)
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
@@ -473,23 +472,20 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->sync_mode = wbc->sync_mode;
__entry->for_kupdate = wbc->for_kupdate;
__entry->for_background = wbc->for_background;
- __entry->for_reclaim = wbc->for_reclaim;
__entry->range_cyclic = wbc->range_cyclic;
__entry->range_start = (long)wbc->range_start;
__entry->range_end = (long)wbc->range_end;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
- "bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx cgroup_ino=%lu",
+ TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d bgrd=%d "
+ "cyclic=%d start=0x%lx end=0x%lx cgroup_ino=%lu",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
__entry->sync_mode,
__entry->for_kupdate,
__entry->for_background,
- __entry->for_reclaim,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 45c4fa13499c..bdedbaccf776 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -1493,6 +1493,8 @@ struct drm_amdgpu_info_hw_ip {
__u32 available_rings;
/** version info: bits 23:16 major, 15:8 minor, 7:0 revision */
__u32 ip_discovery_version;
+ /* Userq available slots */
+ __u32 userq_num_slots;
};
/* GFX metadata BO sizes and alignment info (in bytes) */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 81202a50dc9e..ea91aa8afde9 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -210,6 +210,10 @@ extern "C" {
#define DRM_FORMAT_RGBA1010102 fourcc_code('R', 'A', '3', '0') /* [31:0] R:G:B:A 10:10:10:2 little endian */
#define DRM_FORMAT_BGRA1010102 fourcc_code('B', 'A', '3', '0') /* [31:0] B:G:R:A 10:10:10:2 little endian */
+/* 48 bpp RGB */
+#define DRM_FORMAT_RGB161616 fourcc_code('R', 'G', '4', '8') /* [47:0] R:G:B 16:16:16 little endian */
+#define DRM_FORMAT_BGR161616 fourcc_code('B', 'G', '4', '8') /* [47:0] B:G:R 16:16:16 little endian */
+
/* 64 bpp RGB */
#define DRM_FORMAT_XRGB16161616 fourcc_code('X', 'R', '4', '8') /* [63:0] x:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_XBGR16161616 fourcc_code('X', 'B', '4', '8') /* [63:0] x:B:G:R 16:16:16:16 little endian */
@@ -218,7 +222,7 @@ extern "C" {
#define DRM_FORMAT_ABGR16161616 fourcc_code('A', 'B', '4', '8') /* [63:0] A:B:G:R 16:16:16:16 little endian */
/*
- * Floating point 64bpp RGB
+ * Half-Floating point - 16b/component
* IEEE 754-2008 binary16 half-precision float
* [15:0] sign:exponent:mantissa 1:5:10
*/
@@ -228,6 +232,20 @@ extern "C" {
#define DRM_FORMAT_ARGB16161616F fourcc_code('A', 'R', '4', 'H') /* [63:0] A:R:G:B 16:16:16:16 little endian */
#define DRM_FORMAT_ABGR16161616F fourcc_code('A', 'B', '4', 'H') /* [63:0] A:B:G:R 16:16:16:16 little endian */
+#define DRM_FORMAT_R16F fourcc_code('R', ' ', ' ', 'H') /* [15:0] R 16 little endian */
+#define DRM_FORMAT_GR1616F fourcc_code('G', 'R', ' ', 'H') /* [31:0] G:R 16:16 little endian */
+#define DRM_FORMAT_BGR161616F fourcc_code('B', 'G', 'R', 'H') /* [47:0] B:G:R 16:16:16 little endian */
+
+/*
+ * Floating point - 32b/component
+ * IEEE 754-2008 binary32 float
+ * [31:0] sign:exponent:mantissa 1:8:23
+ */
+#define DRM_FORMAT_R32F fourcc_code('R', ' ', ' ', 'F') /* [31:0] R 32 little endian */
+#define DRM_FORMAT_GR3232F fourcc_code('G', 'R', ' ', 'F') /* [63:0] R:G 32:32 little endian */
+#define DRM_FORMAT_BGR323232F fourcc_code('B', 'G', 'R', 'F') /* [95:0] R:G:B 32:32:32 little endian */
+#define DRM_FORMAT_ABGR32323232F fourcc_code('A', 'B', '8', 'F') /* [127:0] R:G:B:A 32:32:32:32 little endian */
+
/*
* RGBA format with 10-bit components packed in 64-bit per pixel, with 6 bits
* of unused padding per component:
@@ -378,6 +396,42 @@ extern "C" {
#define DRM_FORMAT_Q401 fourcc_code('Q', '4', '0', '1')
/*
+ * 3 plane YCbCr LSB aligned
+ * In order to use these formats in a similar fashion to MSB aligned ones
+ * implementation can multiply the values by 2^6=64. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [6:10] little endian
+ * index 1 = Cr plane, [15:0] z:Cr [6:10] little endian
+ * index 2 = Cb plane, [15:0] z:Cb [6:10] little endian
+ */
+#define DRM_FORMAT_S010 fourcc_code('S', '0', '1', '0') /* 2x2 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+#define DRM_FORMAT_S210 fourcc_code('S', '2', '1', '0') /* 2x1 subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+#define DRM_FORMAT_S410 fourcc_code('S', '4', '1', '0') /* non-subsampled Cb (1) and Cr (2) planes 10 bits per channel */
+
+/*
+ * 3 plane YCbCr LSB aligned
+ * In order to use these formats in a similar fashion to MSB aligned ones
+ * implementation can multiply the values by 2^4=16. For that reason the padding
+ * must only contain zeros.
+ * index 0 = Y plane, [15:0] z:Y [4:12] little endian
+ * index 1 = Cr plane, [15:0] z:Cr [4:12] little endian
+ * index 2 = Cb plane, [15:0] z:Cb [4:12] little endian
+ */
+#define DRM_FORMAT_S012 fourcc_code('S', '0', '1', '2') /* 2x2 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+#define DRM_FORMAT_S212 fourcc_code('S', '2', '1', '2') /* 2x1 subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+#define DRM_FORMAT_S412 fourcc_code('S', '4', '1', '2') /* non-subsampled Cb (1) and Cr (2) planes 12 bits per channel */
+
+/*
+ * 3 plane YCbCr
+ * index 0 = Y plane, [15:0] Y little endian
+ * index 1 = Cr plane, [15:0] Cr little endian
+ * index 2 = Cb plane, [15:0] Cb little endian
+ */
+#define DRM_FORMAT_S016 fourcc_code('S', '0', '1', '6') /* 2x2 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+#define DRM_FORMAT_S216 fourcc_code('S', '2', '1', '6') /* 2x1 subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+#define DRM_FORMAT_S416 fourcc_code('S', '4', '1', '6') /* non-subsampled Cb (1) and Cr (2) planes 16 bits per channel */
+
+/*
* 3 plane YCbCr
* index 0: Y plane, [7:0] Y
* index 1: Cb plane, [7:0] Cb
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 2f24103f4533..160ee1411d4a 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -445,6 +445,9 @@ struct drm_ivpu_metric_streamer_get_data {
__u64 data_size;
};
+/* Command queue flags */
+#define DRM_IVPU_CMDQ_FLAG_TURBO 0x00000001
+
/**
* struct drm_ivpu_cmdq_create - Create command queue for job submission
*/
@@ -462,6 +465,17 @@ struct drm_ivpu_cmdq_create {
* %DRM_IVPU_JOB_PRIORITY_REALTIME
*/
__u32 priority;
+ /**
+ * @flags:
+ *
+ * Supported flags:
+ *
+ * %DRM_IVPU_CMDQ_FLAG_TURBO
+ *
+ * Enable low-latency mode for the command queue. The NPU will maximize performance
+ * when executing jobs from such queue at the cost of increased power usage.
+ */
+ __u32 flags;
};
/**
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 2342cb90857e..5c67294edc95 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -91,6 +91,32 @@ struct drm_msm_timespec {
#define MSM_PARAM_UBWC_SWIZZLE 0x12 /* RO */
#define MSM_PARAM_MACROTILE_MODE 0x13 /* RO */
#define MSM_PARAM_UCHE_TRAP_BASE 0x14 /* RO */
+/* PRR (Partially Resident Region) is required for sparse residency: */
+#define MSM_PARAM_HAS_PRR 0x15 /* RO */
+/* MSM_PARAM_EN_VM_BIND is set to 1 to enable VM_BIND ops.
+ *
+ * With VM_BIND enabled, userspace is required to allocate iova and use the
+ * VM_BIND ops for map/unmap ioctls. MSM_INFO_SET_IOVA and MSM_INFO_GET_IOVA
+ * will be rejected. (The latter does not have a sensible meaning when a BO
+ * can have multiple and/or partial mappings.)
+ *
+ * With VM_BIND enabled, userspace does not include a submit_bo table in the
+ * SUBMIT ioctl (this will be rejected), the resident set is determined by
+ * the the VM_BIND ops.
+ *
+ * Enabling VM_BIND will fail on devices which do not have per-process pgtables.
+ * And it is not allowed to disable VM_BIND once it has been enabled.
+ *
+ * Enabling VM_BIND should be done (attempted) prior to allocating any BOs or
+ * submitqueues of type MSM_SUBMITQUEUE_VM_BIND.
+ *
+ * Relatedly, when VM_BIND mode is enabled, the kernel will not try to recover
+ * from GPU faults or failed async VM_BIND ops, in particular because it is
+ * difficult to communicate to userspace which op failed so that userspace
+ * could rewind and try again. When the VM is marked unusable, the SUBMIT
+ * ioctl will throw -EPIPE.
+ */
+#define MSM_PARAM_EN_VM_BIND 0x16 /* WO, once */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@@ -114,6 +140,19 @@ struct drm_msm_param {
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
#define MSM_BO_GPU_READONLY 0x00000002
+/* Private buffers do not need to be explicitly listed in the SUBMIT
+ * ioctl, unless referenced by a drm_msm_gem_submit_cmd. Private
+ * buffers may NOT be imported/exported or used for scanout (or any
+ * other situation where buffers can be indefinitely pinned, but
+ * cases other than scanout are all kernel owned BOs which are not
+ * visible to userspace).
+ *
+ * In exchange for those constraints, all private BOs associated with
+ * a single context (drm_file) share a single dma_resv, and if there
+ * has been no eviction since the last submit, there are no per-BO
+ * bookeeping to do, significantly cutting the SUBMIT overhead.
+ */
+#define MSM_BO_NO_SHARE 0x00000004
#define MSM_BO_CACHE_MASK 0x000f0000
/* cache modes */
#define MSM_BO_CACHED 0x00010000
@@ -123,6 +162,7 @@ struct drm_msm_param {
#define MSM_BO_FLAGS (MSM_BO_SCANOUT | \
MSM_BO_GPU_READONLY | \
+ MSM_BO_NO_SHARE | \
MSM_BO_CACHE_MASK)
struct drm_msm_gem_new {
@@ -180,6 +220,17 @@ struct drm_msm_gem_cpu_fini {
* Cmdstream Submission:
*/
+#define MSM_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
+#define MSM_SYNCOBJ_FLAGS ( \
+ MSM_SYNCOBJ_RESET | \
+ 0)
+
+struct drm_msm_syncobj {
+ __u32 handle; /* in, syncobj handle. */
+ __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
+ __u64 point; /* in, timepoint for timeline syncobjs. */
+};
+
/* The value written into the cmdstream is logically:
*
* ((relocbuf->gpuaddr + reloc_offset) << shift) | or
@@ -221,7 +272,10 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */
__u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */
- __u64 relocs; /* in, ptr to array of submit_reloc's */
+ union {
+ __u64 relocs; /* in, ptr to array of submit_reloc's */
+ __u64 iova; /* cmdstream address (for VM_BIND contexts) */
+ };
};
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the
@@ -269,17 +323,6 @@ struct drm_msm_gem_submit_bo {
MSM_SUBMIT_FENCE_SN_IN | \
0)
-#define MSM_SUBMIT_SYNCOBJ_RESET 0x00000001 /* Reset syncobj after wait. */
-#define MSM_SUBMIT_SYNCOBJ_FLAGS ( \
- MSM_SUBMIT_SYNCOBJ_RESET | \
- 0)
-
-struct drm_msm_gem_submit_syncobj {
- __u32 handle; /* in, syncobj handle. */
- __u32 flags; /* in, from MSM_SUBMIT_SYNCOBJ_FLAGS */
- __u64 point; /* in, timepoint for timeline syncobjs. */
-};
-
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
@@ -293,13 +336,80 @@ struct drm_msm_gem_submit {
__u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
__u32 queueid; /* in, submitqueue id */
- __u64 in_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
- __u64 out_syncobjs; /* in, ptr to array of drm_msm_gem_submit_syncobj */
+ __u64 in_syncobjs; /* in, ptr to array of drm_msm_syncobj */
+ __u64 out_syncobjs; /* in, ptr to array of drm_msm_syncobj */
__u32 nr_in_syncobjs; /* in, number of entries in in_syncobj */
__u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */
__u32 syncobj_stride; /* in, stride of syncobj arrays. */
__u32 pad; /*in, reserved for future use, always 0. */
+};
+#define MSM_VM_BIND_OP_UNMAP 0
+#define MSM_VM_BIND_OP_MAP 1
+#define MSM_VM_BIND_OP_MAP_NULL 2
+
+#define MSM_VM_BIND_OP_DUMP 1
+#define MSM_VM_BIND_OP_FLAGS ( \
+ MSM_VM_BIND_OP_DUMP | \
+ 0)
+
+/**
+ * struct drm_msm_vm_bind_op - bind/unbind op to run
+ */
+struct drm_msm_vm_bind_op {
+ /** @op: one of MSM_VM_BIND_OP_x */
+ __u32 op;
+ /** @handle: GEM object handle, MBZ for UNMAP or MAP_NULL */
+ __u32 handle;
+ /** @obj_offset: Offset into GEM object, MBZ for UNMAP or MAP_NULL */
+ __u64 obj_offset;
+ /** @iova: Address to operate on */
+ __u64 iova;
+ /** @range: Number of bites to to map/unmap */
+ __u64 range;
+ /** @flags: Bitmask of MSM_VM_BIND_OP_FLAG_x */
+ __u32 flags;
+ /** @pad: MBZ */
+ __u32 pad;
+};
+
+#define MSM_VM_BIND_FENCE_FD_IN 0x00000001
+#define MSM_VM_BIND_FENCE_FD_OUT 0x00000002
+#define MSM_VM_BIND_FLAGS ( \
+ MSM_VM_BIND_FENCE_FD_IN | \
+ MSM_VM_BIND_FENCE_FD_OUT | \
+ 0)
+
+/**
+ * struct drm_msm_vm_bind - Input of &DRM_IOCTL_MSM_VM_BIND
+ */
+struct drm_msm_vm_bind {
+ /** @flags: in, bitmask of MSM_VM_BIND_x */
+ __u32 flags;
+ /** @nr_ops: the number of bind ops in this ioctl */
+ __u32 nr_ops;
+ /** @fence_fd: in/out fence fd (see MSM_VM_BIND_FENCE_FD_IN/OUT) */
+ __s32 fence_fd;
+ /** @queue_id: in, submitqueue id */
+ __u32 queue_id;
+ /** @in_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
+ __u64 in_syncobjs;
+ /** @out_syncobjs: in, ptr to array of drm_msm_gem_syncobj */
+ __u64 out_syncobjs;
+ /** @nr_in_syncobjs: in, number of entries in in_syncobj */
+ __u32 nr_in_syncobjs;
+ /** @nr_out_syncobjs: in, number of entries in out_syncobj */
+ __u32 nr_out_syncobjs;
+ /** @syncobj_stride: in, stride of syncobj arrays */
+ __u32 syncobj_stride;
+ /** @op_stride: sizeof each struct drm_msm_vm_bind_op in @ops */
+ __u32 op_stride;
+ union {
+ /** @op: used if num_ops == 1 */
+ struct drm_msm_vm_bind_op op;
+ /** @ops: userptr to array of drm_msm_vm_bind_op if num_ops > 1 */
+ __u64 ops;
+ };
};
#define MSM_WAIT_FENCE_BOOST 0x00000001
@@ -345,12 +455,19 @@ struct drm_msm_gem_madvise {
/*
* Draw queues allow the user to set specific submission parameter. Command
* submissions specify a specific submitqueue to use. ID 0 is reserved for
- * backwards compatibility as a "default" submitqueue
+ * backwards compatibility as a "default" submitqueue.
+ *
+ * Because VM_BIND async updates happen on the CPU, they must run on a
+ * virtual queue created with the flag MSM_SUBMITQUEUE_VM_BIND. If we had
+ * a way to do pgtable updates on the GPU, we could drop this restriction.
*/
#define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001
+#define MSM_SUBMITQUEUE_VM_BIND 0x00000002 /* virtual queue for VM_BIND ops */
+
#define MSM_SUBMITQUEUE_FLAGS ( \
MSM_SUBMITQUEUE_ALLOW_PREEMPT | \
+ MSM_SUBMITQUEUE_VM_BIND | \
0)
/*
@@ -388,6 +505,7 @@ struct drm_msm_submitqueue_query {
#define DRM_MSM_SUBMITQUEUE_NEW 0x0A
#define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B
#define DRM_MSM_SUBMITQUEUE_QUERY 0x0C
+#define DRM_MSM_VM_BIND 0x0D
#define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param)
#define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param)
@@ -401,6 +519,7 @@ struct drm_msm_submitqueue_query {
#define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue)
#define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32)
#define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query)
+#define DRM_IOCTL_MSM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_VM_BIND, struct drm_msm_vm_bind)
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/panfrost_drm.h b/include/uapi/drm/panfrost_drm.h
index 568724be6628..ed67510395bd 100644
--- a/include/uapi/drm/panfrost_drm.h
+++ b/include/uapi/drm/panfrost_drm.h
@@ -21,6 +21,7 @@ extern "C" {
#define DRM_PANFROST_PERFCNT_ENABLE 0x06
#define DRM_PANFROST_PERFCNT_DUMP 0x07
#define DRM_PANFROST_MADVISE 0x08
+#define DRM_PANFROST_SET_LABEL_BO 0x09
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
@@ -29,6 +30,7 @@ extern "C" {
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
#define DRM_IOCTL_PANFROST_MADVISE DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MADVISE, struct drm_panfrost_madvise)
+#define DRM_IOCTL_PANFROST_SET_LABEL_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_SET_LABEL_BO, struct drm_panfrost_set_label_bo)
/*
* Unstable ioctl(s): only exposed when the unsafe unstable_ioctls module
@@ -227,6 +229,25 @@ struct drm_panfrost_madvise {
__u32 retained; /* out, whether backing store still exists */
};
+/**
+ * struct drm_panfrost_set_label_bo - ioctl argument for labelling Panfrost BOs.
+ */
+struct drm_panfrost_set_label_bo {
+ /** @handle: Handle of the buffer object to label. */
+ __u32 handle;
+
+ /** @pad: MBZ. */
+ __u32 pad;
+
+ /**
+ * @label: User pointer to a NUL-terminated string
+ *
+ * Length cannot be greater than 4096.
+ * NULL is permitted and means clear the label.
+ */
+ __u64 label;
+};
+
/* Definitions for coredump decoding in user space */
#define PANFROSTDUMP_MAJOR 1
#define PANFROSTDUMP_MINOR 0
diff --git a/include/uapi/drm/panthor_drm.h b/include/uapi/drm/panthor_drm.h
index ad9a70afea6c..e1f43deb7eca 100644
--- a/include/uapi/drm/panthor_drm.h
+++ b/include/uapi/drm/panthor_drm.h
@@ -130,6 +130,20 @@ enum drm_panthor_ioctl_id {
/** @DRM_PANTHOR_BO_SET_LABEL: Label a BO. */
DRM_PANTHOR_BO_SET_LABEL,
+
+ /**
+ * @DRM_PANTHOR_SET_USER_MMIO_OFFSET: Set the offset to use as the user MMIO offset.
+ *
+ * The default behavior is to pick the MMIO offset based on the size of the pgoff_t
+ * type seen by the process that manipulates the FD, such that a 32-bit process can
+ * always map the user MMIO ranges. But this approach doesn't work well for emulators
+ * like FEX, where the emulator is an 64-bit binary which might be executing 32-bit
+ * code. In that case, the kernel thinks it's the 64-bit process and assumes
+ * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT is in use, but the UMD library expects
+ * DRM_PANTHOR_USER_MMIO_OFFSET_32BIT, because it can't mmap() anything above the
+ * pgoff_t size.
+ */
+ DRM_PANTHOR_SET_USER_MMIO_OFFSET,
};
/**
@@ -296,6 +310,9 @@ struct drm_panthor_gpu_info {
/** @as_present: Bitmask encoding the number of address-space exposed by the MMU. */
__u32 as_present;
+ /** @pad0: MBZ. */
+ __u32 pad0;
+
/** @shader_present: Bitmask encoding the shader cores exposed by the GPU. */
__u64 shader_present;
@@ -999,6 +1016,28 @@ struct drm_panthor_bo_set_label {
};
/**
+ * struct drm_panthor_set_user_mmio_offset - Arguments passed to
+ * DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET
+ *
+ * This ioctl is only really useful if you want to support userspace
+ * CPU emulation environments where the size of an unsigned long differs
+ * between the host and the guest architectures.
+ */
+struct drm_panthor_set_user_mmio_offset {
+ /**
+ * @offset: User MMIO offset to use.
+ *
+ * Must be either DRM_PANTHOR_USER_MMIO_OFFSET_32BIT or
+ * DRM_PANTHOR_USER_MMIO_OFFSET_64BIT.
+ *
+ * Use DRM_PANTHOR_USER_MMIO_OFFSET (which selects OFFSET_32BIT or
+ * OFFSET_64BIT based on the size of an unsigned long) unless you
+ * have a very good reason to overrule this decision.
+ */
+ __u64 offset;
+};
+
+/**
* DRM_IOCTL_PANTHOR() - Build a Panthor IOCTL number
* @__access: Access type. Must be R, W or RW.
* @__id: One of the DRM_PANTHOR_xxx id.
@@ -1042,6 +1081,8 @@ enum {
DRM_IOCTL_PANTHOR(WR, TILER_HEAP_DESTROY, tiler_heap_destroy),
DRM_IOCTL_PANTHOR_BO_SET_LABEL =
DRM_IOCTL_PANTHOR(WR, BO_SET_LABEL, bo_set_label),
+ DRM_IOCTL_PANTHOR_SET_USER_MMIO_OFFSET =
+ DRM_IOCTL_PANTHOR(WR, SET_USER_MMIO_OFFSET, set_user_mmio_offset),
};
#if defined(__cplusplus)
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
index 6a702ba7817c..e2426413488f 100644
--- a/include/uapi/drm/xe_drm.h
+++ b/include/uapi/drm/xe_drm.h
@@ -925,9 +925,9 @@ struct drm_xe_gem_mmap_offset {
* - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
* exec submissions to its exec_queues that don't have an upper time
* limit on the job execution time. But exec submissions to these
- * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
- * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
- * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * don't allow any of the sync types DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ, used as out-syncobjs, that is,
+ * together with sync flag DRM_XE_SYNC_FLAG_SIGNAL.
* LR VMs can be created in recoverable page-fault mode using
* DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
* If that flag is omitted, the UMD can not rely on the slightly
@@ -1394,7 +1394,7 @@ struct drm_xe_sync {
/**
* @timeline_value: Input for the timeline sync object. Needs to be
- * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ * different than 0 when used with %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ.
*/
__u64 timeline_value;
@@ -1617,6 +1617,9 @@ enum drm_xe_oa_unit_type {
/** @DRM_XE_OA_UNIT_TYPE_OAM: OAM OA unit */
DRM_XE_OA_UNIT_TYPE_OAM,
+
+ /** @DRM_XE_OA_UNIT_TYPE_OAM_SAG: OAM_SAG OA unit */
+ DRM_XE_OA_UNIT_TYPE_OAM_SAG,
};
/**
@@ -1638,6 +1641,7 @@ struct drm_xe_oa_unit {
#define DRM_XE_OA_CAPS_SYNCS (1 << 1)
#define DRM_XE_OA_CAPS_OA_BUFFER_SIZE (1 << 2)
#define DRM_XE_OA_CAPS_WAIT_NUM_REPORTS (1 << 3)
+#define DRM_XE_OA_CAPS_OAM (1 << 4)
/** @oa_timestamp_freq: OA timestamp freq */
__u64 oa_timestamp_freq;
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 2e21b5594f81..ea5a0899ecf0 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -6,9 +6,10 @@
* Alexander Kjeldaas <astor@guardian.no>
* with help from Aleph1, Roland Buresund and Andrew Main.
*
- * See here for the libcap library ("POSIX draft" compliance):
+ * See here for the libcap2 library (compliant with Section 25 of
+ * the withdrawn POSIX 1003.1e Draft 17):
*
- * ftp://www.kernel.org/pub/linux/libs/security/linux-privs/kernel-2.6/
+ * https://www.kernel.org/pub/linux/libs/security/linux-privs/libcap2/
*/
#ifndef _UAPI_LINUX_CAPABILITY_H
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index f29b6c44655e..c218c89e0e2e 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -56,6 +56,7 @@ enum {
IOMMUFD_CMD_VDEVICE_ALLOC = 0x91,
IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92,
IOMMUFD_CMD_VEVENTQ_ALLOC = 0x93,
+ IOMMUFD_CMD_HW_QUEUE_ALLOC = 0x94,
};
/**
@@ -591,16 +592,43 @@ struct iommu_hw_info_arm_smmuv3 {
};
/**
+ * struct iommu_hw_info_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Hardware
+ * Information (IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV)
+ *
+ * @flags: Must be 0
+ * @version: Version number for the CMDQ-V HW for PARAM bits[03:00]
+ * @log2vcmdqs: Log2 of the total number of VCMDQs for PARAM bits[07:04]
+ * @log2vsids: Log2 of the total number of SID replacements for PARAM bits[15:12]
+ * @__reserved: Must be 0
+ *
+ * VMM can use these fields directly in its emulated global PARAM register. Note
+ * that only one Virtual Interface (VINTF) should be exposed to a VM, i.e. PARAM
+ * bits[11:08] should be set to 0 for log2 of the total number of VINTFs.
+ */
+struct iommu_hw_info_tegra241_cmdqv {
+ __u32 flags;
+ __u8 version;
+ __u8 log2vcmdqs;
+ __u8 log2vsids;
+ __u8 __reserved;
+};
+
+/**
* enum iommu_hw_info_type - IOMMU Hardware Info Types
- * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
+ * @IOMMU_HW_INFO_TYPE_NONE: Output by the drivers that do not report hardware
* info
+ * @IOMMU_HW_INFO_TYPE_DEFAULT: Input to request for a default type
* @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
* @IOMMU_HW_INFO_TYPE_ARM_SMMUV3: ARM SMMUv3 iommu info type
+ * @IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM
+ * SMMUv3) info type
*/
enum iommu_hw_info_type {
IOMMU_HW_INFO_TYPE_NONE = 0,
+ IOMMU_HW_INFO_TYPE_DEFAULT = 0,
IOMMU_HW_INFO_TYPE_INTEL_VTD = 1,
IOMMU_HW_INFO_TYPE_ARM_SMMUV3 = 2,
+ IOMMU_HW_INFO_TYPE_TEGRA241_CMDQV = 3,
};
/**
@@ -626,6 +654,15 @@ enum iommufd_hw_capabilities {
};
/**
+ * enum iommufd_hw_info_flags - Flags for iommu_hw_info
+ * @IOMMU_HW_INFO_FLAG_INPUT_TYPE: If set, @in_data_type carries an input type
+ * for user space to request for a specific info
+ */
+enum iommufd_hw_info_flags {
+ IOMMU_HW_INFO_FLAG_INPUT_TYPE = 1 << 0,
+};
+
+/**
* struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
* @size: sizeof(struct iommu_hw_info)
* @flags: Must be 0
@@ -634,6 +671,12 @@ enum iommufd_hw_capabilities {
* data that kernel supports
* @data_uptr: User pointer to a user-space buffer used by the kernel to fill
* the iommu type specific hardware information data
+ * @in_data_type: This shares the same field with @out_data_type, making it be
+ * a bidirectional field. When IOMMU_HW_INFO_FLAG_INPUT_TYPE is
+ * set, an input type carried via this @in_data_type field will
+ * be valid, requesting for the info data to the given type. If
+ * IOMMU_HW_INFO_FLAG_INPUT_TYPE is unset, any input value will
+ * be seen as IOMMU_HW_INFO_TYPE_DEFAULT
* @out_data_type: Output the iommu hardware info type as defined in the enum
* iommu_hw_info_type.
* @out_capabilities: Output the generic iommu capability info type as defined
@@ -663,7 +706,10 @@ struct iommu_hw_info {
__u32 dev_id;
__u32 data_len;
__aligned_u64 data_uptr;
- __u32 out_data_type;
+ union {
+ __u32 in_data_type;
+ __u32 out_data_type;
+ };
__u8 out_max_pasid_log2;
__u8 __reserved[3];
__aligned_u64 out_capabilities;
@@ -951,10 +997,29 @@ struct iommu_fault_alloc {
* enum iommu_viommu_type - Virtual IOMMU Type
* @IOMMU_VIOMMU_TYPE_DEFAULT: Reserved for future use
* @IOMMU_VIOMMU_TYPE_ARM_SMMUV3: ARM SMMUv3 driver specific type
+ * @IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM
+ * SMMUv3) enabled ARM SMMUv3 type
*/
enum iommu_viommu_type {
IOMMU_VIOMMU_TYPE_DEFAULT = 0,
IOMMU_VIOMMU_TYPE_ARM_SMMUV3 = 1,
+ IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV = 2,
+};
+
+/**
+ * struct iommu_viommu_tegra241_cmdqv - NVIDIA Tegra241 CMDQV Virtual Interface
+ * (IOMMU_VIOMMU_TYPE_TEGRA241_CMDQV)
+ * @out_vintf_mmap_offset: mmap offset argument for VINTF's page0
+ * @out_vintf_mmap_length: mmap length argument for VINTF's page0
+ *
+ * Both @out_vintf_mmap_offset and @out_vintf_mmap_length are reported by kernel
+ * for user space to mmap the VINTF page0 from the host physical address space
+ * to the guest physical address space so that a guest kernel can directly R/W
+ * access to the VINTF page0 in order to control its virtual command queues.
+ */
+struct iommu_viommu_tegra241_cmdqv {
+ __aligned_u64 out_vintf_mmap_offset;
+ __aligned_u64 out_vintf_mmap_length;
};
/**
@@ -965,6 +1030,9 @@ enum iommu_viommu_type {
* @dev_id: The device's physical IOMMU will be used to back the virtual IOMMU
* @hwpt_id: ID of a nesting parent HWPT to associate to
* @out_viommu_id: Output virtual IOMMU ID for the allocated object
+ * @data_len: Length of the type specific data
+ * @__reserved: Must be 0
+ * @data_uptr: User pointer to a driver-specific virtual IOMMU data
*
* Allocate a virtual IOMMU object, representing the underlying physical IOMMU's
* virtualization support that is a security-isolated slice of the real IOMMU HW
@@ -985,6 +1053,9 @@ struct iommu_viommu_alloc {
__u32 dev_id;
__u32 hwpt_id;
__u32 out_viommu_id;
+ __u32 data_len;
+ __u32 __reserved;
+ __aligned_u64 data_uptr;
};
#define IOMMU_VIOMMU_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIOMMU_ALLOC)
@@ -995,10 +1066,15 @@ struct iommu_viommu_alloc {
* @dev_id: The physical device to allocate a virtual instance on the vIOMMU
* @out_vdevice_id: Object handle for the vDevice. Pass to IOMMU_DESTORY
* @virt_id: Virtual device ID per vIOMMU, e.g. vSID of ARM SMMUv3, vDeviceID
- * of AMD IOMMU, and vRID of a nested Intel VT-d to a Context Table
+ * of AMD IOMMU, and vRID of Intel VT-d
*
* Allocate a virtual device instance (for a physical device) against a vIOMMU.
* This instance holds the device's information (related to its vIOMMU) in a VM.
+ * User should use IOMMU_DESTROY to destroy the virtual device before
+ * destroying the physical device (by closing vfio_cdev fd). Otherwise the
+ * virtual device would be forcibly destroyed on physical device destruction,
+ * its vdevice_id would be permanently leaked (unremovable & unreusable) until
+ * iommu fd closed.
*/
struct iommu_vdevice_alloc {
__u32 size;
@@ -1075,10 +1151,12 @@ struct iommufd_vevent_header {
* enum iommu_veventq_type - Virtual Event Queue Type
* @IOMMU_VEVENTQ_TYPE_DEFAULT: Reserved for future use
* @IOMMU_VEVENTQ_TYPE_ARM_SMMUV3: ARM SMMUv3 Virtual Event Queue
+ * @IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV Extension IRQ
*/
enum iommu_veventq_type {
IOMMU_VEVENTQ_TYPE_DEFAULT = 0,
IOMMU_VEVENTQ_TYPE_ARM_SMMUV3 = 1,
+ IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV = 2,
};
/**
@@ -1103,6 +1181,19 @@ struct iommu_vevent_arm_smmuv3 {
};
/**
+ * struct iommu_vevent_tegra241_cmdqv - Tegra241 CMDQV IRQ
+ * (IOMMU_VEVENTQ_TYPE_TEGRA241_CMDQV)
+ * @lvcmdq_err_map: 128-bit logical vcmdq error map, little-endian.
+ * (Refer to register LVCMDQ_ERR_MAPs per VINTF )
+ *
+ * The 128-bit register value from HW exclusively reflect the error bits for a
+ * Virtual Interface represented by a vIOMMU object. Read and report directly.
+ */
+struct iommu_vevent_tegra241_cmdqv {
+ __aligned_le64 lvcmdq_err_map[2];
+};
+
+/**
* struct iommu_veventq_alloc - ioctl(IOMMU_VEVENTQ_ALLOC)
* @size: sizeof(struct iommu_veventq_alloc)
* @flags: Must be 0
@@ -1141,4 +1232,61 @@ struct iommu_veventq_alloc {
__u32 __reserved;
};
#define IOMMU_VEVENTQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VEVENTQ_ALLOC)
+
+/**
+ * enum iommu_hw_queue_type - HW Queue Type
+ * @IOMMU_HW_QUEUE_TYPE_DEFAULT: Reserved for future use
+ * @IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV: NVIDIA Tegra241 CMDQV (extension for ARM
+ * SMMUv3) Virtual Command Queue (VCMDQ)
+ */
+enum iommu_hw_queue_type {
+ IOMMU_HW_QUEUE_TYPE_DEFAULT = 0,
+ /*
+ * TEGRA241_CMDQV requirements (otherwise, allocation will fail)
+ * - alloc starts from the lowest @index=0 in ascending order
+ * - destroy starts from the last allocated @index in descending order
+ * - @base_addr must be aligned to @length in bytes and mapped in IOAS
+ * - @length must be a power of 2, with a minimum 32 bytes and a maximum
+ * 2 ^ idr[1].CMDQS * 16 bytes (use GET_HW_INFO call to read idr[1]
+ * from struct iommu_hw_info_arm_smmuv3)
+ * - suggest to back the queue memory with contiguous physical pages or
+ * a single huge page with alignment of the queue size, and limit the
+ * emulated vSMMU's IDR1.CMDQS to log2(huge page size / 16 bytes)
+ */
+ IOMMU_HW_QUEUE_TYPE_TEGRA241_CMDQV = 1,
+};
+
+/**
+ * struct iommu_hw_queue_alloc - ioctl(IOMMU_HW_QUEUE_ALLOC)
+ * @size: sizeof(struct iommu_hw_queue_alloc)
+ * @flags: Must be 0
+ * @viommu_id: Virtual IOMMU ID to associate the HW queue with
+ * @type: One of enum iommu_hw_queue_type
+ * @index: The logical index to the HW queue per virtual IOMMU for a multi-queue
+ * model
+ * @out_hw_queue_id: The ID of the new HW queue
+ * @nesting_parent_iova: Base address of the queue memory in the guest physical
+ * address space
+ * @length: Length of the queue memory
+ *
+ * Allocate a HW queue object for a vIOMMU-specific HW-accelerated queue, which
+ * allows HW to access a guest queue memory described using @nesting_parent_iova
+ * and @length.
+ *
+ * A vIOMMU can allocate multiple queues, but it must use a different @index per
+ * type to separate each allocation, e.g::
+ *
+ * Type1 HW queue0, Type1 HW queue1, Type2 HW queue0, ...
+ */
+struct iommu_hw_queue_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 viommu_id;
+ __u32 type;
+ __u32 index;
+ __u32 out_hw_queue_id;
+ __aligned_u64 nesting_parent_iova;
+ __aligned_u64 length;
+};
+#define IOMMU_HW_QUEUE_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HW_QUEUE_ALLOC)
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 7a4c35ff03fe..f0f0d49d2544 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -644,6 +644,7 @@ struct kvm_ioeventfd {
#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
+#define KVM_X86_DISABLE_EXITS_APERFMPERF (1 << 4)
/* for KVM_ENABLE_CAP */
struct kvm_enable_cap {
@@ -960,6 +961,7 @@ struct kvm_enable_cap {
#define KVM_CAP_ARM_EL2 240
#define KVM_CAP_ARM_EL2_E2H0 241
#define KVM_CAP_RISCV_MP_STATE_RESET 242
+#define KVM_CAP_ARM_CACHEABLE_PFNMAP_SUPPORTED 243
struct kvm_irq_routing_irqchip {
__u32 irqchip;
diff --git a/include/uapi/linux/media/raspberrypi/pisp_be_config.h b/include/uapi/linux/media/raspberrypi/pisp_be_config.h
index cbeb714f4d61..2ad3b90684d7 100644
--- a/include/uapi/linux/media/raspberrypi/pisp_be_config.h
+++ b/include/uapi/linux/media/raspberrypi/pisp_be_config.h
@@ -21,10 +21,11 @@
/* preferred byte alignment for outputs */
#define PISP_BACK_END_OUTPUT_MAX_ALIGN 64u
-/* minimum allowed tile width anywhere in the pipeline */
-#define PISP_BACK_END_MIN_TILE_WIDTH 16u
-/* minimum allowed tile width anywhere in the pipeline */
-#define PISP_BACK_END_MIN_TILE_HEIGHT 16u
+/* minimum allowed tile sizes anywhere in the pipeline */
+#define PISP_BACK_END_MIN_TILE_WIDTH 16u
+#define PISP_BACK_END_MIN_TILE_HEIGHT 16u
+#define PISP_BACK_END_MAX_TILE_WIDTH 65536u
+#define PISP_BACK_END_MAX_TILE_HEIGHT 65536u
#define PISP_BACK_END_NUM_OUTPUTS 2
#define PISP_BACK_END_HOG_OUTPUT 1
diff --git a/include/uapi/linux/rkisp1-config.h b/include/uapi/linux/rkisp1-config.h
index 2d995f3c1ca3..3b060ea6eed7 100644
--- a/include/uapi/linux/rkisp1-config.h
+++ b/include/uapi/linux/rkisp1-config.h
@@ -170,6 +170,13 @@
#define RKISP1_CIF_ISP_COMPAND_NUM_POINTS 64
/*
+ * Wide Dynamic Range
+ */
+#define RKISP1_CIF_ISP_WDR_CURVE_NUM_INTERV 32
+#define RKISP1_CIF_ISP_WDR_CURVE_NUM_COEFF (RKISP1_CIF_ISP_WDR_CURVE_NUM_INTERV + 1)
+#define RKISP1_CIF_ISP_WDR_CURVE_NUM_DY_REGS 4
+
+/*
* Measurement types
*/
#define RKISP1_CIF_ISP_STAT_AWB (1U << 0)
@@ -889,6 +896,72 @@ struct rkisp1_cif_isp_compand_curve_config {
__u32 y[RKISP1_CIF_ISP_COMPAND_NUM_POINTS];
};
+/**
+ * struct rkisp1_cif_isp_wdr_tone_curve - Tone mapping curve definition for WDR.
+ *
+ * @dY: the dYn increments for horizontal (input) axis of the tone curve.
+ * each 3-bit dY value represents an increment of 2**(value+3).
+ * dY[0] bits 0:2 is increment dY1, bit 3 unused
+ * dY[0] bits 4:6 is increment dY2, bit 7 unused
+ * ...
+ * dY[0] bits 28:30 is increment dY8, bit 31 unused
+ * ... and so on till dY[3] bits 28:30 is increment dY32, bit 31 unused.
+ * @ym: the Ym values for the vertical (output) axis of the tone curve.
+ * each value is 13 bit.
+ */
+struct rkisp1_cif_isp_wdr_tone_curve {
+ __u32 dY[RKISP1_CIF_ISP_WDR_CURVE_NUM_DY_REGS];
+ __u16 ym[RKISP1_CIF_ISP_WDR_CURVE_NUM_COEFF];
+};
+
+/**
+ * struct rkisp1_cif_isp_wdr_iref_config - Illumination reference config for WDR.
+ *
+ * Use illumination reference value as described below, instead of only the
+ * luminance (Y) value for tone mapping and gain calculations:
+ * IRef = (rgb_factor * RGBMax_tr + (8 - rgb_factor) * Y)/8
+ *
+ * @rgb_factor: defines how much influence the RGBmax approach has in
+ * comparison to Y (valid values are 0..8).
+ * @use_y9_8: use Y*9/8 for maximum value calculation along with the
+ * default of R, G, B for noise reduction.
+ * @use_rgb7_8: decrease RGBMax by 7/8 for noise reduction.
+ * @disable_transient: disable transient calculation between Y and RGBY_max.
+ */
+struct rkisp1_cif_isp_wdr_iref_config {
+ __u8 rgb_factor;
+ __u8 use_y9_8;
+ __u8 use_rgb7_8;
+ __u8 disable_transient;
+};
+
+/**
+ * struct rkisp1_cif_isp_wdr_config - Configuration for wide dynamic range.
+ *
+ * @tone_curve: tone mapping curve.
+ * @iref_config: illumination reference configuration. (when use_iref is true)
+ * @rgb_offset: RGB offset value for RGB operation mode. (12 bits)
+ * @luma_offset: luminance offset value for RGB operation mode. (12 bits)
+ * @dmin_thresh: lower threshold for deltaMin value. (12 bits)
+ * @dmin_strength: strength factor for deltaMin. (valid range is 0x00..0x10)
+ * @use_rgb_colorspace: use RGB instead of luminance/chrominance colorspace.
+ * @bypass_chroma_mapping: disable chrominance mapping (only valid if
+ * use_rgb_colorspace = 0)
+ * @use_iref: use illumination reference instead of Y for tone mapping
+ * and gain calculations.
+ */
+struct rkisp1_cif_isp_wdr_config {
+ struct rkisp1_cif_isp_wdr_tone_curve tone_curve;
+ struct rkisp1_cif_isp_wdr_iref_config iref_config;
+ __u16 rgb_offset;
+ __u16 luma_offset;
+ __u16 dmin_thresh;
+ __u8 dmin_strength;
+ __u8 use_rgb_colorspace;
+ __u8 bypass_chroma_mapping;
+ __u8 use_iref;
+};
+
/*---------- PART2: Measurement Statistics ------------*/
/**
@@ -1059,6 +1132,7 @@ struct rkisp1_stat_buffer {
* @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS: BLS in the compand block
* @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND: Companding expand curve
* @RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS: Companding compress curve
+ * @RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR: Wide dynamic range
*/
enum rkisp1_ext_params_block_type {
RKISP1_EXT_PARAMS_BLOCK_TYPE_BLS,
@@ -1081,11 +1155,15 @@ enum rkisp1_ext_params_block_type {
RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_BLS,
RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_EXPAND,
RKISP1_EXT_PARAMS_BLOCK_TYPE_COMPAND_COMPRESS,
+ RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR,
};
#define RKISP1_EXT_PARAMS_FL_BLOCK_DISABLE (1U << 0)
#define RKISP1_EXT_PARAMS_FL_BLOCK_ENABLE (1U << 1)
+/* A bitmask of parameters blocks supported on the current hardware. */
+#define RKISP1_CID_SUPPORTED_PARAMS_BLOCKS (V4L2_CID_USER_RKISP1_BASE + 0x01)
+
/**
* struct rkisp1_ext_params_block_header - RkISP1 extensible parameters block
* header
@@ -1460,6 +1538,23 @@ struct rkisp1_ext_params_compand_curve_config {
struct rkisp1_cif_isp_compand_curve_config config;
} __attribute__((aligned(8)));
+/**
+ * struct rkisp1_ext_params_wdr_config - RkISP1 extensible params
+ * Wide dynamic range config
+ *
+ * RkISP1 extensible parameters WDR block.
+ * Identified by :c:type:`RKISP1_EXT_PARAMS_BLOCK_TYPE_WDR`
+ *
+ * @header: The RkISP1 extensible parameters header, see
+ * :c:type:`rkisp1_ext_params_block_header`
+ * @config: WDR configuration, see
+ * :c:type:`rkisp1_cif_isp_wdr_config`
+ */
+struct rkisp1_ext_params_wdr_config {
+ struct rkisp1_ext_params_block_header header;
+ struct rkisp1_cif_isp_wdr_config config;
+} __attribute__((aligned(8)));
+
/*
* The rkisp1_ext_params_compand_curve_config structure is counted twice as it
* is used for both the COMPAND_EXPAND and COMPAND_COMPRESS block types.
@@ -1484,7 +1579,8 @@ struct rkisp1_ext_params_compand_curve_config {
sizeof(struct rkisp1_ext_params_afc_config) +\
sizeof(struct rkisp1_ext_params_compand_bls_config) +\
sizeof(struct rkisp1_ext_params_compand_curve_config) +\
- sizeof(struct rkisp1_ext_params_compand_curve_config))
+ sizeof(struct rkisp1_ext_params_compand_curve_config) +\
+ sizeof(struct rkisp1_ext_params_wdr_config))
/**
* enum rksip1_ext_param_buffer_version - RkISP1 extensible parameters version
@@ -1520,6 +1616,14 @@ enum rksip1_ext_param_buffer_version {
* V4L2 control. If such control is not available, userspace should assume only
* RKISP1_EXT_PARAM_BUFFER_V1 is supported by the driver.
*
+ * The read-only V4L2 control ``RKISP1_CID_SUPPORTED_PARAMS_BLOCKS`` can be used
+ * to query the blocks supported by the device. It contains a bitmask where each
+ * bit represents the availability of the corresponding entry from the
+ * :c:type:`rkisp1_ext_params_block_type` enum. The current and default values
+ * of the control represents the blocks supported by the device instance, while
+ * the maximum value represents the blocks supported by the kernel driver,
+ * independently of the device instance.
+ *
* For each ISP block that userspace wants to configure, a block-specific
* structure is appended to the @data buffer, one after the other without gaps
* in between nor overlaps. Userspace shall populate the @data_size field with
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 72e32814ea83..f836512e9deb 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -222,6 +222,12 @@ enum v4l2_colorfx {
*/
#define V4L2_CID_USER_UVC_BASE (V4L2_CID_USER_BASE + 0x11e0)
+/*
+ * The base for Rockchip ISP1 driver controls.
+ * We reserve 16 controls for this driver.
+ */
+#define V4L2_CID_USER_RKISP1_BASE (V4L2_CID_USER_BASE + 0x1220)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 9e3b366d5fc7..3dd9fa45dde1 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -726,7 +726,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12 v4l2_fourcc('G', 'B', '1', '2') /* 12 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG12 v4l2_fourcc('B', 'A', '1', '2') /* 12 GRGR.. BGBG.. */
#define V4L2_PIX_FMT_SRGGB12 v4l2_fourcc('R', 'G', '1', '2') /* 12 RGRG.. GBGB.. */
- /* 12bit raw bayer packed, 6 bytes for every 4 pixels */
+ /* 12bit raw bayer packed, 3 bytes for every 2 pixels */
#define V4L2_PIX_FMT_SBGGR12P v4l2_fourcc('p', 'B', 'C', 'C')
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
@@ -840,6 +840,12 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_PISP_COMP2_BGGR v4l2_fourcc('P', 'C', '2', 'B') /* PiSP 8-bit mode 2 compressed BGGR bayer */
#define V4L2_PIX_FMT_PISP_COMP2_MONO v4l2_fourcc('P', 'C', '2', 'M') /* PiSP 8-bit mode 2 compressed monochrome */
+/* Renesas RZ/V2H CRU packed formats. 64-bit units with contiguous pixels */
+#define V4L2_PIX_FMT_RAW_CRU10 v4l2_fourcc('C', 'R', '1', '0')
+#define V4L2_PIX_FMT_RAW_CRU12 v4l2_fourcc('C', 'R', '1', '2')
+#define V4L2_PIX_FMT_RAW_CRU14 v4l2_fourcc('C', 'R', '1', '4')
+#define V4L2_PIX_FMT_RAW_CRU20 v4l2_fourcc('C', 'R', '2', '0')
+
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
#define V4L2_SDR_FMT_CU16LE v4l2_fourcc('C', 'U', '1', '6') /* IQ u16le */
@@ -861,6 +867,7 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
+#define V4L2_META_FMT_UVC_MSXU_1_5 v4l2_fourcc('U', 'V', 'C', 'M') /* UVC MSXU metadata */
#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
/* Vendor specific - used for RK_ISP1 camera sub-system */
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 11b94b0b035b..98b71b9979f8 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-2-Clause) */
/*
- * Copyright 2018-2024 Amazon.com, Inc. or its affiliates. All rights reserved.
+ * Copyright 2018-2025 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef EFA_ABI_USER_H
@@ -131,6 +131,7 @@ enum {
EFA_QUERY_DEVICE_CAPS_DATA_POLLING_128 = 1 << 4,
EFA_QUERY_DEVICE_CAPS_RDMA_WRITE = 1 << 5,
EFA_QUERY_DEVICE_CAPS_UNSOLICITED_WRITE_RECV = 1 << 6,
+ EFA_QUERY_DEVICE_CAPS_CQ_WITH_EXT_MEM = 1 << 7,
};
struct efa_ibv_ex_query_device_resp {
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index ac7b162611ed..de6f5a94f1e3 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -55,6 +55,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_DM,
UVERBS_OBJECT_COUNTERS,
UVERBS_OBJECT_ASYNC_EVENT,
+ UVERBS_OBJECT_DMAH,
};
enum {
@@ -105,6 +106,10 @@ enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_FLAGS,
UVERBS_ATTR_CREATE_CQ_RESP_CQE,
UVERBS_ATTR_CREATE_CQ_EVENT_FD,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_VA,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_LENGTH,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_FD,
+ UVERBS_ATTR_CREATE_CQ_BUFFER_OFFSET,
};
enum uverbs_attrs_destroy_cq_cmd_attr_ids {
@@ -236,6 +241,22 @@ enum uverbs_methods_dm {
UVERBS_METHOD_DM_FREE,
};
+enum uverbs_attrs_alloc_dmah_cmd_attr_ids {
+ UVERBS_ATTR_ALLOC_DMAH_HANDLE,
+ UVERBS_ATTR_ALLOC_DMAH_CPU_ID,
+ UVERBS_ATTR_ALLOC_DMAH_TPH_MEM_TYPE,
+ UVERBS_ATTR_ALLOC_DMAH_PH,
+};
+
+enum uverbs_attrs_free_dmah_cmd_attr_ids {
+ UVERBS_ATTR_FREE_DMA_HANDLE,
+};
+
+enum uverbs_methods_dmah {
+ UVERBS_METHOD_DMAH_ALLOC,
+ UVERBS_METHOD_DMAH_FREE,
+};
+
enum uverbs_attrs_reg_dm_mr_cmd_attr_ids {
UVERBS_ATTR_REG_DM_MR_HANDLE,
UVERBS_ATTR_REG_DM_MR_OFFSET,
@@ -253,6 +274,7 @@ enum uverbs_methods_mr {
UVERBS_METHOD_ADVISE_MR,
UVERBS_METHOD_QUERY_MR,
UVERBS_METHOD_REG_DMABUF_MR,
+ UVERBS_METHOD_REG_MR,
};
enum uverbs_attrs_mr_destroy_ids {
@@ -286,6 +308,20 @@ enum uverbs_attrs_reg_dmabuf_mr_cmd_attr_ids {
UVERBS_ATTR_REG_DMABUF_MR_RESP_RKEY,
};
+enum uverbs_attrs_reg_mr_cmd_attr_ids {
+ UVERBS_ATTR_REG_MR_HANDLE,
+ UVERBS_ATTR_REG_MR_PD_HANDLE,
+ UVERBS_ATTR_REG_MR_DMA_HANDLE,
+ UVERBS_ATTR_REG_MR_IOVA,
+ UVERBS_ATTR_REG_MR_ADDR,
+ UVERBS_ATTR_REG_MR_LENGTH,
+ UVERBS_ATTR_REG_MR_ACCESS_FLAGS,
+ UVERBS_ATTR_REG_MR_FD,
+ UVERBS_ATTR_REG_MR_FD_OFFSET,
+ UVERBS_ATTR_REG_MR_RESP_LKEY,
+ UVERBS_ATTR_REG_MR_RESP_RKEY,
+};
+
enum uverbs_attrs_create_counters_cmd_attr_ids {
UVERBS_ATTR_CREATE_COUNTERS_HANDLE,
};
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index c0c59a8f7256..72fd385037a6 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -182,6 +182,11 @@ enum attr_idn {
QUERY_ATTR_IDN_CURR_WB_BUFF_SIZE = 0x1F,
QUERY_ATTR_IDN_TIMESTAMP = 0x30,
QUERY_ATTR_IDN_DEV_LVL_EXCEPTION_ID = 0x34,
+ QUERY_ATTR_IDN_HID_DEFRAG_OPERATION = 0x35,
+ QUERY_ATTR_IDN_HID_AVAILABLE_SIZE = 0x36,
+ QUERY_ATTR_IDN_HID_SIZE = 0x37,
+ QUERY_ATTR_IDN_HID_PROGRESS_RATIO = 0x38,
+ QUERY_ATTR_IDN_HID_STATE = 0x39,
QUERY_ATTR_IDN_WB_BUF_RESIZE_HINT = 0x3C,
QUERY_ATTR_IDN_WB_BUF_RESIZE_EN = 0x3D,
QUERY_ATTR_IDN_WB_BUF_RESIZE_STATUS = 0x3E,
@@ -401,6 +406,7 @@ enum {
UFS_DEV_HPB_SUPPORT = BIT(7),
UFS_DEV_WRITE_BOOSTER_SUP = BIT(8),
UFS_DEV_LVL_EXCEPTION_SUP = BIT(12),
+ UFS_DEV_HID_SUPPORT = BIT(13),
};
#define UFS_DEV_HPB_SUPPORT_VERSION 0x310
@@ -466,6 +472,24 @@ enum ufs_ref_clk_freq {
REF_CLK_FREQ_INVAL = -1,
};
+/* bDefragOperation attribute values */
+enum ufs_hid_defrag_operation {
+ HID_ANALYSIS_AND_DEFRAG_DISABLE = 0,
+ HID_ANALYSIS_ENABLE = 1,
+ HID_ANALYSIS_AND_DEFRAG_ENABLE = 2,
+};
+
+/* bHIDState attribute values */
+enum ufs_hid_state {
+ HID_IDLE = 0,
+ ANALYSIS_IN_PROGRESS = 1,
+ DEFRAG_REQUIRED = 2,
+ DEFRAG_IN_PROGRESS = 3,
+ DEFRAG_COMPLETED = 4,
+ DEFRAG_NOT_REQUIRED = 5,
+ NUM_UFS_HID_STATES = 6,
+};
+
/* bWriteBoosterBufferResizeEn attribute */
enum wb_resize_en {
WB_RESIZE_EN_IDLE = 0,
@@ -625,6 +649,8 @@ struct ufs_dev_info {
u32 rtc_update_period;
u8 rtt_cap; /* bDeviceRTTCap */
+
+ bool hid_sup;
};
/*
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 9b3515cee711..1d3943777584 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -1480,6 +1480,7 @@ void ufshcd_resume_complete(struct device *dev);
bool ufshcd_is_hba_active(struct ufs_hba *hba);
void ufshcd_pm_qos_init(struct ufs_hba *hba);
void ufshcd_pm_qos_exit(struct ufs_hba *hba);
+int ufshcd_dme_rmw(struct ufs_hba *hba, u32 mask, u32 val, u32 attr);
/* Wrapper functions for safely calling variant operations */
static inline int ufshcd_vops_init(struct ufs_hba *hba)
diff --git a/include/video/edid.h b/include/video/edid.h
index f614371e9116..c2b186b1933a 100644
--- a/include/video/edid.h
+++ b/include/video/edid.h
@@ -4,7 +4,8 @@
#include <uapi/video/edid.h>
-#ifdef CONFIG_X86
+#if defined(CONFIG_FIRMWARE_EDID)
extern struct edid_info edid_info;
#endif
+
#endif /* __linux_video_edid_h__ */
diff --git a/include/video/sisfb.h b/include/video/sisfb.h
index 76ff628a1220..54e6632cd4a2 100644
--- a/include/video/sisfb.h
+++ b/include/video/sisfb.h
@@ -15,10 +15,4 @@
#define SIS_300_VGA 1
#define SIS_315_VGA 2
-#define SISFB_HAVE_MALLOC_NEW
-extern void sis_malloc(struct sis_memreq *req);
-extern void sis_malloc_new(struct pci_dev *pdev, struct sis_memreq *req);
-
-extern void sis_free(u32 base);
-extern void sis_free_new(struct pci_dev *pdev, u32 base);
#endif
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index 47f11bec5e90..9e2a769b0d96 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -30,13 +30,11 @@ void xen_arch_suspend(void);
void xen_reboot(int reason);
void xen_resume_notifier_register(struct notifier_block *nb);
-void xen_resume_notifier_unregister(struct notifier_block *nb);
bool xen_vcpu_stolen(int vcpu);
void xen_setup_runstate_info(int cpu);
void xen_time_setup_guest(void);
void xen_manage_runstate_time(int action);
-void xen_get_runstate_snapshot(struct vcpu_runstate_info *res);
u64 xen_steal_clock(int cpu);
int xen_setup_shutdown_event(void);
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 3f90bdd387b6..1c23e6387f13 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -154,8 +154,6 @@ void *xenbus_read(struct xenbus_transaction t,
const char *dir, const char *node, unsigned int *len);
int xenbus_write(struct xenbus_transaction t,
const char *dir, const char *node, const char *string);
-int xenbus_mkdir(struct xenbus_transaction t,
- const char *dir, const char *node);
int xenbus_exists(struct xenbus_transaction t,
const char *dir, const char *node);
int xenbus_rm(struct xenbus_transaction t, const char *dir, const char *node);