summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/cacheflush.h7
-rw-r--r--include/asm-generic/current.h2
-rw-r--r--include/asm-generic/hugetlb.h2
-rw-r--r--include/asm-generic/hyperv-tlfs.h1
-rw-r--r--include/asm-generic/ide_iops.h39
-rw-r--r--include/asm-generic/io.h31
-rw-r--r--include/asm-generic/iomap.h6
-rw-r--r--include/asm-generic/mshyperv.h17
-rw-r--r--include/asm-generic/pgalloc.h88
-rw-r--r--include/asm-generic/preempt.h14
-rw-r--r--include/asm-generic/tlb.h12
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/drm_bridge.h3
-rw-r--r--include/drm/drm_crtc.h5
-rw-r--r--include/drm/drm_debugfs.h25
-rw-r--r--include/drm/drm_drv.h32
-rw-r--r--include/drm/drm_exec.h150
-rw-r--r--include/drm/drm_file.h8
-rw-r--r--include/drm/drm_gem.h82
-rw-r--r--include/drm/drm_gem_dma_helper.h14
-rw-r--r--include/drm/drm_gem_shmem_helper.h19
-rw-r--r--include/drm/drm_gem_vram_helper.h9
-rw-r--r--include/drm/drm_gpuva_mgr.h706
-rw-r--r--include/drm/drm_kunit_helpers.h11
-rw-r--r--include/drm/drm_modeset_helper_vtables.h48
-rw-r--r--include/drm/drm_panel.h96
-rw-r--r--include/drm/drm_plane.h2
-rw-r--r--include/drm/drm_prime.h7
-rw-r--r--include/drm/drm_syncobj.h6
-rw-r--r--include/drm/drm_sysfs.h4
-rw-r--r--include/drm/task_barrier.h4
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/dt-bindings/arm/qcom,ids.h3
-rw-r--r--include/dt-bindings/ata/ahci.h2
-rw-r--r--include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h53
-rw-r--r--include/dt-bindings/clock/amlogic,a1-pll-clkc.h5
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h12
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h65
-rw-r--r--include/dt-bindings/clock/axg-clkc.h48
-rw-r--r--include/dt-bindings/clock/exynos3250.h18
-rw-r--r--include/dt-bindings/clock/exynos4.h5
-rw-r--r--include/dt-bindings/clock/exynos5250.h3
-rw-r--r--include/dt-bindings/clock/exynos5260-clk.h25
-rw-r--r--include/dt-bindings/clock/exynos5410.h2
-rw-r--r--include/dt-bindings/clock/exynos5420.h3
-rw-r--r--include/dt-bindings/clock/exynos5433.h42
-rw-r--r--include/dt-bindings/clock/exynos7885.h4
-rw-r--r--include/dt-bindings/clock/exynos850.h10
-rw-r--r--include/dt-bindings/clock/g12a-aoclkc.h7
-rw-r--r--include/dt-bindings/clock/g12a-clkc.h130
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h65
-rw-r--r--include/dt-bindings/clock/hi3559av100-clock.h2
-rw-r--r--include/dt-bindings/clock/imx8-clock.h28
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h2
-rw-r--r--include/dt-bindings/clock/imx93-clock.h3
-rw-r--r--include/dt-bindings/clock/intel,agilex5-clkmgr.h100
-rw-r--r--include/dt-bindings/clock/marvell,mmp2-audio.h1
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h1
-rw-r--r--include/dt-bindings/clock/marvell,pxa168.h1
-rw-r--r--include/dt-bindings/clock/marvell,pxa1928.h3
-rw-r--r--include/dt-bindings/clock/marvell,pxa910.h1
-rw-r--r--include/dt-bindings/clock/meson8b-clkc.h97
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq4019.h6
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq5018.h183
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8917.h1
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h3
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc8280xp.h10
-rw-r--r--include/dt-bindings/clock/qcom,ipq9574-gcc.h2
-rw-r--r--include/dt-bindings/clock/qcom,lcc-mdm9615.h44
-rw-r--r--include/dt-bindings/clock/qcom,qdu1000-gcc.h4
-rw-r--r--include/dt-bindings/clock/r8a779f0-cpg-mssr.h2
-rw-r--r--include/dt-bindings/clock/rockchip,rk3588-cru.h2
-rw-r--r--include/dt-bindings/clock/samsung,exynosautov9.h18
-rw-r--r--include/dt-bindings/clock/starfive,jh7110-crg.h80
-rw-r--r--include/dt-bindings/clock/stm32mp1-clks.h2
-rw-r--r--include/dt-bindings/clock/sun20i-d1-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun20i-d1-r-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-a100-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h6-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun50i-h616-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun6i-rtc.h2
-rw-r--r--include/dt-bindings/display/sdtv-standards.h2
-rw-r--r--include/dt-bindings/firmware/qcom,scm.h21
-rw-r--r--include/dt-bindings/gpio/amlogic-c3-gpio.h72
-rw-r--r--include/dt-bindings/gpio/meson-g12a-gpio.h2
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h88
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h124
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h50
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h22
-rw-r--r--include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h22
-rw-r--r--include/dt-bindings/interconnect/qcom,rpm-icc.h13
-rw-r--r--include/dt-bindings/interconnect/qcom,sm8250.h7
-rw-r--r--include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h126
-rw-r--r--include/dt-bindings/memory/mediatek,mt8188-memory-port.h489
-rw-r--r--include/dt-bindings/mux/ti-serdes.h8
-rw-r--r--include/dt-bindings/power/amlogic,c3-pwrc.h25
-rw-r--r--include/dt-bindings/power/meson-a1-power.h2
-rw-r--r--include/dt-bindings/power/meson-axg-power.h2
-rw-r--r--include/dt-bindings/power/meson-g12a-power.h2
-rw-r--r--include/dt-bindings/power/meson-gxbb-power.h2
-rw-r--r--include/dt-bindings/power/meson-s4-power.h2
-rw-r--r--include/dt-bindings/power/meson-sm1-power.h2
-rw-r--r--include/dt-bindings/power/meson8-power.h2
-rw-r--r--include/dt-bindings/power/qcom,rpmhpd.h30
-rw-r--r--include/dt-bindings/power/r8a779f0-sysc.h2
-rw-r--r--include/dt-bindings/power/rk3588-power.h2
-rw-r--r--include/dt-bindings/power/summit,smb347-charger.h2
-rw-r--r--include/dt-bindings/regulator/st,stm32mp13-regulator.h42
-rw-r--r--include/dt-bindings/reset/altr,rst-mgr-s10.h5
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq5018.h122
-rw-r--r--include/dt-bindings/reset/rockchip,rk3588-cru.h2
-rw-r--r--include/dt-bindings/reset/starfive,jh7110-crg.h60
-rw-r--r--include/dt-bindings/reset/stm32mp1-resets.h2
-rw-r--r--include/dt-bindings/reset/sun20i-d1-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun20i-d1-r-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-a100-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-a100-r-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h6-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h6-r-ccu.h2
-rw-r--r--include/dt-bindings/reset/sun50i-h616-ccu.h2
-rw-r--r--include/kunit/test.h1
-rw-r--r--include/kvm/arm_pmu.h4
-rw-r--r--include/linux/aer.h11
-rw-r--r--include/linux/amd-iommu.h1
-rw-r--r--include/linux/args.h28
-rw-r--r--include/linux/arm-smccc.h69
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/backing-dev.h1
-rw-r--r--include/linux/bio.h12
-rw-r--r--include/linux/blk-mq.h6
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/bpf.h4
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/btf_ids.h2
-rw-r--r--include/linux/buffer_head.h53
-rw-r--r--include/linux/cacheflush.h13
-rw-r--r--include/linux/ceph/ceph_fs.h68
-rw-r--r--include/linux/ceph/messenger.h40
-rw-r--r--include/linux/ceph/osd_client.h93
-rw-r--r--include/linux/ceph/rados.h4
-rw-r--r--include/linux/cfi.h4
-rw-r--r--include/linux/cgroup-defs.h14
-rw-r--r--include/linux/clk-provider.h2
-rw-r--r--include/linux/clk/mmp.h18
-rw-r--r--include/linux/console.h3
-rw-r--r--include/linux/coresight.h59
-rw-r--r--include/linux/counter.h2
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/crash_core.h27
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/damon.h28
-rw-r--r--include/linux/dax.h4
-rw-r--r--include/linux/dev_printk.h2
-rw-r--r--include/linux/device.h38
-rw-r--r--include/linux/dlm_plock.h2
-rw-r--r--include/linux/dma-map-ops.h12
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/dmar.h2
-rw-r--r--include/linux/dynamic_debug.h4
-rw-r--r--include/linux/efi.h3
-rw-r--r--include/linux/elf-fdpic.h14
-rw-r--r--include/linux/evm.h14
-rw-r--r--include/linux/export-internal.h2
-rw-r--r--include/linux/export.h4
-rw-r--r--include/linux/extcon.h12
-rw-r--r--include/linux/fb.h68
-rw-r--r--include/linux/firmware/imx/dsp.h6
-rw-r--r--include/linux/firmware/imx/sci.h16
-rw-r--r--include/linux/firmware/intel/stratix10-smc.h25
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h5
-rw-r--r--include/linux/firmware/mediatek/mtk-adsp-ipc.h6
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h2
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h13
-rw-r--r--include/linux/frontswap.h91
-rw-r--r--include/linux/fs.h49
-rw-r--r--include/linux/fs_uart_pd.h71
-rw-r--r--include/linux/fsnotify_backend.h3
-rw-r--r--include/linux/ftrace.h5
-rw-r--r--include/linux/gameport.h2
-rw-r--r--include/linux/genl_magic_func.h27
-rw-r--r--include/linux/genl_magic_struct.h8
-rw-r--r--include/linux/greybus/svc.h3
-rw-r--r--include/linux/hid-roccat.h2
-rw-r--r--include/linux/hid.h26
-rw-r--r--include/linux/highmem.h44
-rw-r--r--include/linux/huge_mm.h6
-rw-r--r--include/linux/hugetlb.h38
-rw-r--r--include/linux/hw_breakpoint.h3
-rw-r--r--include/linux/hyperv.h6
-rw-r--r--include/linux/i2c-atr.h116
-rw-r--r--include/linux/i2c.h11
-rw-r--r--include/linux/if_team.h2
-rw-r--r--include/linux/iio/common/inv_sensors_timestamp.h95
-rw-r--r--include/linux/iio/consumer.h37
-rw-r--r--include/linux/iio/types.h2
-rw-r--r--include/linux/instruction_pointer.h5
-rw-r--r--include/linux/int_log.h (renamed from include/media/dvb_math.h)18
-rw-r--r--include/linux/intel_tpmi.h2
-rw-r--r--include/linux/interconnect-provider.h2
-rw-r--r--include/linux/io_uring.h6
-rw-r--r--include/linux/io_uring_types.h129
-rw-r--r--include/linux/iomap.h4
-rw-r--r--include/linux/iommu.h31
-rw-r--r--include/linux/iommufd.h9
-rw-r--r--include/linux/ioremap.h30
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/jbd2.h5
-rw-r--r--include/linux/jiffies.h197
-rw-r--r--include/linux/kasan.h2
-rw-r--r--include/linux/kernel.h37
-rw-r--r--include/linux/kernfs.h4
-rw-r--r--include/linux/kexec.h48
-rw-r--r--include/linux/kfence.h11
-rw-r--r--include/linux/kobject.h8
-rw-r--r--include/linux/ksm.h20
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/kvm_host.h53
-rw-r--r--include/linux/leds.h3
-rw-r--r--include/linux/libata.h44
-rw-r--r--include/linux/limits.h2
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lsm_hook_defs.h10
-rw-r--r--include/linux/lsm_hooks.h20
-rw-r--r--include/linux/maple_tree.h46
-rw-r--r--include/linux/math.h19
-rw-r--r--include/linux/memblock.h14
-rw-r--r--include/linux/memcontrol.h18
-rw-r--r--include/linux/memory-tiers.h4
-rw-r--r--include/linux/memory.h8
-rw-r--r--include/linux/memory_hotplug.h3
-rw-r--r--include/linux/mfd/88pm860x.h6
-rw-r--r--include/linux/mfd/abx500/ab8500.h4
-rw-r--r--include/linux/mfd/dbx500-prcmu.h21
-rw-r--r--include/linux/mfd/hi655x-pmic.h1
-rw-r--r--include/linux/mfd/max77686-private.h4
-rw-r--r--include/linux/mfd/rz-mtu3.h66
-rw-r--r--include/linux/mhi.h6
-rw-r--r--include/linux/micrel_phy.h7
-rw-r--r--include/linux/minmax.h91
-rw-r--r--include/linux/misc_cgroup.h28
-rw-r--r--include/linux/mm.h430
-rw-r--r--include/linux/mm_inline.h21
-rw-r--r--include/linux/mm_types.h135
-rw-r--r--include/linux/mm_types_task.h4
-rw-r--r--include/linux/mman.h4
-rw-r--r--include/linux/mmap_lock.h18
-rw-r--r--include/linux/mmu_notifier.h104
-rw-r--r--include/linux/mmzone.h1
-rw-r--r--include/linux/module_symbol.h4
-rw-r--r--include/linux/moduleloader.h5
-rw-r--r--include/linux/mtd/mtd.h2
-rw-r--r--include/linux/mtd/rawnand.h1
-rw-r--r--include/linux/net_mm.h17
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_page.h4
-rw-r--r--include/linux/nmi.h14
-rw-r--r--include/linux/nvmem-consumer.h4
-rw-r--r--include/linux/of.h36
-rw-r--r--include/linux/of_platform.h6
-rw-r--r--include/linux/oid_registry.h1
-rw-r--r--include/linux/page-flags.h90
-rw-r--r--include/linux/page_ext.h9
-rw-r--r--include/linux/page_idle.h5
-rw-r--r--include/linux/page_table_check.h71
-rw-r--r--include/linux/pagemap.h60
-rw-r--r--include/linux/pci.h48
-rw-r--r--include/linux/pci_ids.h110
-rw-r--r--include/linux/pds/pds_adminq.h375
-rw-r--r--include/linux/pds/pds_common.h9
-rw-r--r--include/linux/peci.h4
-rw-r--r--include/linux/percpu.h12
-rw-r--r--include/linux/percpu_counter.h41
-rw-r--r--include/linux/perf/riscv_pmu.h12
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/pgtable.h151
-rw-r--r--include/linux/phylink.h4
-rw-r--r--include/linux/pid_namespace.h39
-rw-r--r--include/linux/platform_data/bd6107.h2
-rw-r--r--include/linux/platform_data/gpio_backlight.h2
-rw-r--r--include/linux/platform_data/lv5207lp.h2
-rw-r--r--include/linux/platform_data/rtc-ds2404.h20
-rw-r--r--include/linux/platform_data/video-mx3fb.h50
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h19
-rw-r--r--include/linux/platform_data/x86/simatic-ipc-base.h5
-rw-r--r--include/linux/platform_data/x86/simatic-ipc.h6
-rw-r--r--include/linux/platform_device.h28
-rw-r--r--include/linux/pm.h9
-rw-r--r--include/linux/proc_fs.h1
-rw-r--r--include/linux/pwm.h6
-rw-r--r--include/linux/raid/pq.h4
-rw-r--r--include/linux/range.h8
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/rpmsg.h15
-rw-r--r--include/linux/rtc.h1
-rw-r--r--include/linux/sched.h14
-rw-r--r--include/linux/sched/signal.h7
-rw-r--r--include/linux/secretmem.h15
-rw-r--r--include/linux/security.h11
-rw-r--r--include/linux/sed-opal.h5
-rw-r--r--include/linux/serial_core.h18
-rw-r--r--include/linux/slab.h23
-rw-r--r--include/linux/soc/qcom/qcom_aoss.h4
-rw-r--r--include/linux/soc/qcom/smd-rpm.h20
-rw-r--r--include/linux/soc/qcom/smem.h1
-rw-r--r--include/linux/soundwire/sdw.h17
-rw-r--r--include/linux/soundwire/sdw_intel.h14
-rw-r--r--include/linux/sprintf.h27
-rw-r--r--include/linux/string_choices.h1
-rw-r--r--include/linux/string_helpers.h2
-rw-r--r--include/linux/sunrpc/cache.h12
-rw-r--r--include/linux/sunrpc/clnt.h2
-rw-r--r--include/linux/sunrpc/stats.h23
-rw-r--r--include/linux/sunrpc/svc.h52
-rw-r--r--include/linux/sunrpc/svc_xprt.h38
-rw-r--r--include/linux/sunrpc/svcauth.h53
-rw-r--r--include/linux/sunrpc/svcsock.h9
-rw-r--r--include/linux/sunrpc/xdr.h6
-rw-r--r--include/linux/sunrpc/xprt.h2
-rw-r--r--include/linux/swap.h21
-rw-r--r--include/linux/swapfile.h5
-rw-r--r--include/linux/swapops.h15
-rw-r--r--include/linux/swiotlb.h131
-rw-r--r--include/linux/switchtec.h1
-rw-r--r--include/linux/syscalls.h1
-rw-r--r--include/linux/sysctl.h31
-rw-r--r--include/linux/sysfb.h3
-rw-r--r--include/linux/sysrq.h18
-rw-r--r--include/linux/tca6416_keypad.h1
-rw-r--r--include/linux/thermal.h87
-rw-r--r--include/linux/ti_wilink_st.h2
-rw-r--r--include/linux/trace_events.h9
-rw-r--r--include/linux/tracefs.h23
-rw-r--r--include/linux/tty.h18
-rw-r--r--include/linux/tty_buffer.h20
-rw-r--r--include/linux/tty_driver.h9
-rw-r--r--include/linux/tty_flip.h70
-rw-r--r--include/linux/tty_ldisc.h67
-rw-r--r--include/linux/tty_port.h7
-rw-r--r--include/linux/usb.h12
-rw-r--r--include/linux/usb/ch9.h5
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/composite.h23
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/phy.h13
-rw-r--r--include/linux/usb/tcpci.h1
-rw-r--r--include/linux/usb/typec_altmode.h2
-rw-r--r--include/linux/userfaultfd_k.h4
-rw-r--r--include/linux/vdpa.h4
-rw-r--r--include/linux/vfio.h69
-rw-r--r--include/linux/vgaarb.h27
-rw-r--r--include/linux/virtio.h24
-rw-r--r--include/linux/workqueue.h115
-rw-r--r--include/linux/xarray.h18
-rw-r--r--include/linux/zswap.h37
-rw-r--r--include/media/cec.h11
-rw-r--r--include/media/davinci/vpif_types.h2
-rw-r--r--include/media/i2c/ds90ub9xx.h22
-rw-r--r--include/media/ipu-bridge.h181
-rw-r--r--include/media/ov_16bit_addr_reg_helpers.h92
-rw-r--r--include/media/v4l2-async.h238
-rw-r--r--include/media/v4l2-cci.h125
-rw-r--r--include/media/v4l2-fwnode.h70
-rw-r--r--include/media/v4l2-subdev.h21
-rw-r--r--include/net/bluetooth/hci_core.h2
-rw-r--r--include/net/ip.h3
-rw-r--r--include/net/ip6_fib.h5
-rw-r--r--include/net/ip_fib.h6
-rw-r--r--include/net/ip_tunnels.h15
-rw-r--r--include/net/ipv6.h9
-rw-r--r--include/net/neighbour.h2
-rw-r--r--include/net/net_namespace.h10
-rw-r--r--include/net/netfilter/nf_tables.h7
-rw-r--r--include/net/scm.h14
-rw-r--r--include/net/sock.h29
-rw-r--r--include/net/tcp.h1
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/rdma/iw_cm.h21
-rw-r--r--include/rv/da_monitor.h2
-rw-r--r--include/scsi/libsas.h32
-rw-r--r--include/scsi/scsi_host.h4
-rw-r--r--include/scsi/scsi_transport_iscsi.h1
-rw-r--r--include/soc/at91/atmel_tcb.h3
-rw-r--r--include/soc/imx/revision.h1
-rw-r--r--include/soc/mediatek/smi.h1
-rw-r--r--include/sound/compress_driver.h2
-rw-r--r--include/sound/control.h31
-rw-r--r--include/sound/core.h4
-rw-r--r--include/sound/cs35l41.h5
-rw-r--r--include/sound/cs35l56.h29
-rw-r--r--include/sound/cs42l43.h17
-rw-r--r--include/sound/designware_i2s.h3
-rw-r--r--include/sound/dmaengine_pcm.h2
-rw-r--r--include/sound/emu10k1.h20
-rw-r--r--include/sound/hda-mlink.h4
-rw-r--r--include/sound/hda_codec.h3
-rw-r--r--include/sound/hdaudio.h26
-rw-r--r--include/sound/hwdep.h2
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/l3.h28
-rw-r--r--include/sound/pcm.h15
-rw-r--r--include/sound/rawmidi.h2
-rw-r--r--include/sound/rt5665.h2
-rw-r--r--include/sound/rt5668.h3
-rw-r--r--include/sound/rt5682.h3
-rw-r--r--include/sound/rt5682s.h3
-rw-r--r--include/sound/simple_card_utils.h5
-rw-r--r--include/sound/soc-acpi.h7
-rw-r--r--include/sound/soc-component.h14
-rw-r--r--include/sound/soc-dai.h29
-rw-r--r--include/sound/soc-dapm.h139
-rw-r--r--include/sound/soc.h6
-rw-r--r--include/sound/sof/topology.h4
-rw-r--r--include/sound/uda134x.h24
-rw-r--r--include/sound/ump.h1
-rw-r--r--include/target/iscsi/iscsi_target_core.h4
-rw-r--r--include/trace/bpf_probe.h2
-rw-r--r--include/trace/events/block.h2
-rw-r--r--include/trace/events/dlm.h51
-rw-r--r--include/trace/events/fsi.h31
-rw-r--r--include/trace/events/fsi_master_i2cr.h107
-rw-r--r--include/trace/events/kyber.h8
-rw-r--r--include/trace/events/sunrpc.h80
-rw-r--r--include/trace/events/task.h2
-rw-r--r--include/trace/events/thp.h33
-rw-r--r--include/trace/events/wbt.h8
-rw-r--r--include/trace/events/xen.h12
-rw-r--r--include/uapi/asm-generic/siginfo.h3
-rw-r--r--include/uapi/drm/amdgpu_drm.h7
-rw-r--r--include/uapi/drm/drm.h84
-rw-r--r--include/uapi/drm/drm_mode.h7
-rw-r--r--include/uapi/drm/ivpu_accel.h9
-rw-r--r--include/uapi/drm/nouveau_drm.h271
-rw-r--r--include/uapi/drm/virtgpu_drm.h16
-rw-r--r--include/uapi/linux/bpf.h4
-rw-r--r--include/uapi/linux/cgroupstats.h2
-rw-r--r--include/uapi/linux/dlm_plock.h1
-rw-r--r--include/uapi/linux/elf-fdpic.h15
-rw-r--r--include/uapi/linux/elf.h4
-rw-r--r--include/uapi/linux/fsi.h10
-rw-r--r--include/uapi/linux/fuse.h60
-rw-r--r--include/uapi/linux/gsmmux.h118
-rw-r--r--include/uapi/linux/io_uring.h21
-rw-r--r--include/uapi/linux/iommufd.h97
-rw-r--r--include/uapi/linux/ioprio.h21
-rw-r--r--include/uapi/linux/kexec.h1
-rw-r--r--include/uapi/linux/kvm.h13
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h1
-rw-r--r--include/uapi/linux/rpmsg.h10
-rw-r--r--include/uapi/linux/sed-opal.h25
-rw-r--r--include/uapi/linux/serial_core.h44
-rw-r--r--include/uapi/linux/sync_file.h2
-rw-r--r--include/uapi/linux/ublk_cmd.h64
-rw-r--r--include/uapi/linux/usb/ch11.h6
-rw-r--r--include/uapi/linux/usb/ch9.h5
-rw-r--r--include/uapi/linux/userfaultfd.h25
-rw-r--r--include/uapi/linux/vfio.h150
-rw-r--r--include/uapi/linux/vhost_types.h4
-rw-r--r--include/uapi/linux/videodev2.h2
-rw-r--r--include/uapi/rdma/bnxt_re-abi.h7
-rw-r--r--include/uapi/rdma/irdma-abi.h9
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h52
-rw-r--r--include/uapi/sound/sof/tokens.h6
-rw-r--r--include/ufs/ufs.h90
-rw-r--r--include/ufs/ufs_quirks.h6
-rw-r--r--include/ufs/ufshcd.h60
-rw-r--r--include/ufs/ufshci.h53
-rw-r--r--include/ufs/unipro.h6
-rw-r--r--include/xen/arm/hypervisor.h12
-rw-r--r--include/xen/events.h3
-rw-r--r--include/xen/xen.h6
471 files changed, 9483 insertions, 3411 deletions
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index f46258d1a080..84ec53ccc450 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -77,13 +77,6 @@ static inline void flush_icache_range(unsigned long start, unsigned long end)
#define flush_icache_user_range flush_icache_range
#endif
-#ifndef flush_icache_page
-static inline void flush_icache_page(struct vm_area_struct *vma,
- struct page *page)
-{
-}
-#endif
-
#ifndef flush_icache_user_page
static inline void flush_icache_user_page(struct vm_area_struct *vma,
struct page *page,
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
index 3a2e224b9fa0..9c2aeecbd05a 100644
--- a/include/asm-generic/current.h
+++ b/include/asm-generic/current.h
@@ -2,9 +2,11 @@
#ifndef __ASM_GENERIC_CURRENT_H
#define __ASM_GENERIC_CURRENT_H
+#ifndef __ASSEMBLY__
#include <linux/thread_info.h>
#define get_current() (current_thread_info()->task)
#define current get_current()
+#endif
#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/hugetlb.h b/include/asm-generic/hugetlb.h
index d7f6335d3999..4da02798a00b 100644
--- a/include/asm-generic/hugetlb.h
+++ b/include/asm-generic/hugetlb.h
@@ -22,7 +22,7 @@ static inline unsigned long huge_pte_dirty(pte_t pte)
static inline pte_t huge_pte_mkwrite(pte_t pte)
{
- return pte_mkwrite(pte);
+ return pte_mkwrite_novma(pte);
}
#ifndef __HAVE_ARCH_HUGE_PTE_WRPROTECT
diff --git a/include/asm-generic/hyperv-tlfs.h b/include/asm-generic/hyperv-tlfs.h
index f4e4cc4f965f..fdac4a1714ec 100644
--- a/include/asm-generic/hyperv-tlfs.h
+++ b/include/asm-generic/hyperv-tlfs.h
@@ -223,6 +223,7 @@ enum HV_GENERIC_SET_FORMAT {
#define HV_STATUS_INVALID_PORT_ID 17
#define HV_STATUS_INVALID_CONNECTION_ID 18
#define HV_STATUS_INSUFFICIENT_BUFFERS 19
+#define HV_STATUS_TIME_OUT 120
#define HV_STATUS_VTL_ALREADY_ENABLED 134
/*
diff --git a/include/asm-generic/ide_iops.h b/include/asm-generic/ide_iops.h
deleted file mode 100644
index 81dfa3ee5e06..000000000000
--- a/include/asm-generic/ide_iops.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/* Generic I/O and MEMIO string operations. */
-
-#define __ide_insw insw
-#define __ide_insl insl
-#define __ide_outsw outsw
-#define __ide_outsl outsl
-
-static __inline__ void __ide_mm_insw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u16 *)addr = readw(port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_insl(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- *(u32 *)addr = readl(port);
- addr += 4;
- }
-}
-
-static __inline__ void __ide_mm_outsw(void __iomem *port, void *addr, u32 count)
-{
- while (count--) {
- writew(*(u16 *)addr, port);
- addr += 2;
- }
-}
-
-static __inline__ void __ide_mm_outsl(void __iomem * port, void *addr, u32 count)
-{
- while (count--) {
- writel(*(u32 *)addr, port);
- addr += 4;
- }
-}
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index 587e7e9b9a37..bac63e874c7b 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -1047,41 +1047,22 @@ static inline void iounmap(volatile void __iomem *addr)
#elif defined(CONFIG_GENERIC_IOREMAP)
#include <linux/pgtable.h>
-/*
- * Arch code can implement the following two hooks when using GENERIC_IOREMAP
- * ioremap_allowed() return a bool,
- * - true means continue to remap
- * - false means skip remap and return directly
- * iounmap_allowed() return a bool,
- * - true means continue to vunmap
- * - false means skip vunmap and return directly
- */
-#ifndef ioremap_allowed
-#define ioremap_allowed ioremap_allowed
-static inline bool ioremap_allowed(phys_addr_t phys_addr, size_t size,
- unsigned long prot)
-{
- return true;
-}
-#endif
-
-#ifndef iounmap_allowed
-#define iounmap_allowed iounmap_allowed
-static inline bool iounmap_allowed(void *addr)
-{
- return true;
-}
-#endif
+void __iomem *generic_ioremap_prot(phys_addr_t phys_addr, size_t size,
+ pgprot_t prot);
void __iomem *ioremap_prot(phys_addr_t phys_addr, size_t size,
unsigned long prot);
void iounmap(volatile void __iomem *addr);
+void generic_iounmap(volatile void __iomem *addr);
+#ifndef ioremap
+#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
/* _PAGE_IOREMAP needs to be supplied by the architecture */
return ioremap_prot(addr, size, _PAGE_IOREMAP);
}
+#endif
#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
#ifndef ioremap_wc
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index 08237ae8b840..196087a8126e 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -93,15 +93,15 @@ extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
extern void ioport_unmap(void __iomem *);
#endif
-#ifndef ARCH_HAS_IOREMAP_WC
+#ifndef ioremap_wc
#define ioremap_wc ioremap
#endif
-#ifndef ARCH_HAS_IOREMAP_WT
+#ifndef ioremap_wt
#define ioremap_wt ioremap
#endif
-#ifndef ARCH_HAS_IOREMAP_NP
+#ifndef ioremap_np
/* See the comment in asm-generic/io.h about ioremap_np(). */
#define ioremap_np ioremap_np
static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index a8f4b653ef4e..cecd2b7bd033 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -36,18 +36,25 @@ struct ms_hyperv_info {
u32 nested_features;
u32 max_vp_index;
u32 max_lp_index;
- u32 isolation_config_a;
+ union {
+ u32 isolation_config_a;
+ struct {
+ u32 paravisor_present : 1;
+ u32 reserved_a1 : 31;
+ };
+ };
union {
u32 isolation_config_b;
struct {
u32 cvm_type : 4;
- u32 reserved1 : 1;
+ u32 reserved_b1 : 1;
u32 shared_gpa_boundary_active : 1;
u32 shared_gpa_boundary_bits : 6;
- u32 reserved2 : 20;
+ u32 reserved_b2 : 20;
};
};
u64 shared_gpa_boundary;
+ u8 vtl;
};
extern struct ms_hyperv_info ms_hyperv;
extern bool hv_nested;
@@ -57,7 +64,8 @@ extern void * __percpu *hyperv_pcpu_output_arg;
extern u64 hv_do_hypercall(u64 control, void *inputaddr, void *outputaddr);
extern u64 hv_do_fast_hypercall8(u16 control, u64 input8);
-extern bool hv_isolation_type_snp(void);
+bool hv_isolation_type_snp(void);
+bool hv_isolation_type_tdx(void);
/* Helper functions that provide a consistent pattern for checking Hyper-V hypercall status. */
static inline int hv_result(u64 status)
@@ -274,6 +282,7 @@ enum hv_isolation_type hv_get_isolation_type(void);
bool hv_is_isolation_supported(void);
bool hv_isolation_type_snp(void);
u64 hv_ghcb_hypercall(u64 control, void *input, void *output, u32 input_size);
+u64 hv_tdx_hypercall(u64 control, u64 param1, u64 param2);
void hyperv_cleanup(void);
bool hv_query_ext_cap(u64 cap_query);
void hv_setup_dma_ops(struct device *dev, bool coherent);
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
index a7cf825befae..c75d4a753849 100644
--- a/include/asm-generic/pgalloc.h
+++ b/include/asm-generic/pgalloc.h
@@ -8,7 +8,7 @@
#define GFP_PGTABLE_USER (GFP_PGTABLE_KERNEL | __GFP_ACCOUNT)
/**
- * __pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * __pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* This function is intended for architectures that need
@@ -18,12 +18,17 @@
*/
static inline pte_t *__pte_alloc_one_kernel(struct mm_struct *mm)
{
- return (pte_t *)__get_free_page(GFP_PGTABLE_KERNEL);
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_PGTABLE_KERNEL &
+ ~__GFP_HIGHMEM, 0);
+
+ if (!ptdesc)
+ return NULL;
+ return ptdesc_address(ptdesc);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE_KERNEL
/**
- * pte_alloc_one_kernel - allocate a page for PTE-level kernel page table
+ * pte_alloc_one_kernel - allocate memory for a PTE-level kernel page table
* @mm: the mm_struct of the current context
*
* Return: pointer to the allocated memory or %NULL on error
@@ -35,40 +40,40 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
#endif
/**
- * pte_free_kernel - free PTE-level kernel page table page
+ * pte_free_kernel - free PTE-level kernel page table memory
* @mm: the mm_struct of the current context
* @pte: pointer to the memory containing the page table
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
- free_page((unsigned long)pte);
+ pagetable_free(virt_to_ptdesc(pte));
}
/**
- * __pte_alloc_one - allocate a page for PTE-level user page table
+ * __pte_alloc_one - allocate memory for a PTE-level user page table
* @mm: the mm_struct of the current context
* @gfp: GFP flags to use for the allocation
*
- * Allocates a page and runs the pgtable_pte_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
*
* This function is intended for architectures that need
* anything beyond simple page allocation or must have custom GFP flags.
*
- * Return: `struct page` initialized as page table or %NULL on error
+ * Return: `struct page` referencing the ptdesc or %NULL on error
*/
static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
{
- struct page *pte;
+ struct ptdesc *ptdesc;
- pte = alloc_page(gfp);
- if (!pte)
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
return NULL;
- if (!pgtable_pte_page_ctor(pte)) {
- __free_page(pte);
+ if (!pagetable_pte_ctor(ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return pte;
+ return ptdesc_page(ptdesc);
}
#ifndef __HAVE_ARCH_PTE_ALLOC_ONE
@@ -76,9 +81,9 @@ static inline pgtable_t __pte_alloc_one(struct mm_struct *mm, gfp_t gfp)
* pte_alloc_one - allocate a page for PTE-level user page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_pte_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pte_ctor().
*
- * Return: `struct page` initialized as page table or %NULL on error
+ * Return: `struct page` referencing the ptdesc or %NULL on error
*/
static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
{
@@ -92,14 +97,16 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm)
*/
/**
- * pte_free - free PTE-level user page table page
+ * pte_free - free PTE-level user page table memory
* @mm: the mm_struct of the current context
- * @pte_page: the `struct page` representing the page table
+ * @pte_page: the `struct page` referencing the ptdesc
*/
static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
{
- pgtable_pte_page_dtor(pte_page);
- __free_page(pte_page);
+ struct ptdesc *ptdesc = page_ptdesc(pte_page);
+
+ pagetable_pte_dtor(ptdesc);
+ pagetable_free(ptdesc);
}
@@ -107,10 +114,11 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
#ifndef __HAVE_ARCH_PMD_ALLOC_ONE
/**
- * pmd_alloc_one - allocate a page for PMD-level page table
+ * pmd_alloc_one - allocate memory for a PMD-level page table
* @mm: the mm_struct of the current context
*
- * Allocates a page and runs the pgtable_pmd_page_ctor().
+ * Allocate memory for a page table and ptdesc and runs pagetable_pmd_ctor().
+ *
* Allocations use %GFP_PGTABLE_USER in user context and
* %GFP_PGTABLE_KERNEL in kernel context.
*
@@ -118,28 +126,30 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte_page)
*/
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
{
- struct page *page;
+ struct ptdesc *ptdesc;
gfp_t gfp = GFP_PGTABLE_USER;
if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- page = alloc_page(gfp);
- if (!page)
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
return NULL;
- if (!pgtable_pmd_page_ctor(page)) {
- __free_page(page);
+ if (!pagetable_pmd_ctor(ptdesc)) {
+ pagetable_free(ptdesc);
return NULL;
}
- return (pmd_t *)page_address(page);
+ return ptdesc_address(ptdesc);
}
#endif
#ifndef __HAVE_ARCH_PMD_FREE
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
+
BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
- pgtable_pmd_page_dtor(virt_to_page(pmd));
- free_page((unsigned long)pmd);
+ pagetable_pmd_dtor(ptdesc);
+ pagetable_free(ptdesc);
}
#endif
@@ -150,19 +160,25 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
static inline pud_t *__pud_alloc_one(struct mm_struct *mm, unsigned long addr)
{
gfp_t gfp = GFP_PGTABLE_USER;
+ struct ptdesc *ptdesc;
if (mm == &init_mm)
gfp = GFP_PGTABLE_KERNEL;
- return (pud_t *)get_zeroed_page(gfp);
+ gfp &= ~__GFP_HIGHMEM;
+
+ ptdesc = pagetable_alloc(gfp, 0);
+ if (!ptdesc)
+ return NULL;
+ return ptdesc_address(ptdesc);
}
#ifndef __HAVE_ARCH_PUD_ALLOC_ONE
/**
- * pud_alloc_one - allocate a page for PUD-level page table
+ * pud_alloc_one - allocate memory for a PUD-level page table
* @mm: the mm_struct of the current context
*
- * Allocates a page using %GFP_PGTABLE_USER for user context and
- * %GFP_PGTABLE_KERNEL for kernel context.
+ * Allocate memory for a page table using %GFP_PGTABLE_USER for user context
+ * and %GFP_PGTABLE_KERNEL for kernel context.
*
* Return: pointer to the allocated memory or %NULL on error
*/
@@ -175,7 +191,7 @@ static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
static inline void __pud_free(struct mm_struct *mm, pud_t *pud)
{
BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
- free_page((unsigned long)pud);
+ pagetable_free(virt_to_ptdesc(pud));
}
#ifndef __HAVE_ARCH_PUD_FREE
@@ -190,7 +206,7 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
#ifndef __HAVE_ARCH_PGD_FREE
static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
{
- free_page((unsigned long)pgd);
+ pagetable_free(virt_to_ptdesc(pgd));
}
#endif
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index b4d43a4af5f7..51f8f3881523 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -80,9 +80,21 @@ static __always_inline bool should_resched(int preempt_offset)
#ifdef CONFIG_PREEMPTION
extern asmlinkage void preempt_schedule(void);
-#define __preempt_schedule() preempt_schedule()
extern asmlinkage void preempt_schedule_notrace(void);
+
+#if defined(CONFIG_PREEMPT_DYNAMIC) && defined(CONFIG_HAVE_PREEMPT_DYNAMIC_KEY)
+
+void dynamic_preempt_schedule(void);
+void dynamic_preempt_schedule_notrace(void);
+#define __preempt_schedule() dynamic_preempt_schedule()
+#define __preempt_schedule_notrace() dynamic_preempt_schedule_notrace()
+
+#else /* !CONFIG_PREEMPT_DYNAMIC || !CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
+
+#define __preempt_schedule() preempt_schedule()
#define __preempt_schedule_notrace() preempt_schedule_notrace()
+
+#endif /* CONFIG_PREEMPT_DYNAMIC && CONFIG_HAVE_PREEMPT_DYNAMIC_KEY*/
#endif /* CONFIG_PREEMPTION */
#endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index b46617207c93..129a3a759976 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -456,7 +456,6 @@ static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
return;
tlb_flush(tlb);
- mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
__tlb_reset_range(tlb);
}
@@ -481,6 +480,17 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
return tlb_remove_page_size(tlb, page, PAGE_SIZE);
}
+static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, void *pt)
+{
+ tlb_remove_table(tlb, pt);
+}
+
+/* Like tlb_remove_ptdesc, but for page-like page directories. */
+static inline void tlb_remove_page_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
+{
+ tlb_remove_page(tlb, ptdesc_page(pt));
+}
+
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index f668e75fbabe..6a46baa0737c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -206,4 +206,6 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
bool force, bool disabled, bool rxsense);
void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
+bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index bf964cdfb330..c339fc85fd07 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -36,6 +36,7 @@ struct drm_bridge;
struct drm_bridge_timings;
struct drm_connector;
struct drm_display_info;
+struct drm_minor;
struct drm_panel;
struct edid;
struct i2c_adapter;
@@ -949,4 +950,6 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
+void drm_bridge_debugfs_init(struct drm_minor *minor);
+
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 8e1cbc75143e..8b48a1974da3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -77,11 +77,6 @@ struct drm_plane_helper_funcs;
* intended to indicate whether a full modeset is needed, rather than strictly
* describing what has changed in a commit. See also:
* drm_atomic_crtc_needs_modeset()
- *
- * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
- * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
- * state like @plane_mask so drivers not converted over to atomic helpers should
- * not rely on these being accurate!
*/
struct drm_crtc_state {
/** @crtc: backpointer to the CRTC */
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 7616f457ce70..cb2c1956a214 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -34,6 +34,22 @@
#include <linux/types.h>
#include <linux/seq_file.h>
+
+#include <drm/drm_gpuva_mgr.h>
+
+/**
+ * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
+ * @show: the &drm_info_list's show callback
+ * @data: driver private data
+ *
+ * Drivers should use this macro to define a &drm_info_list entry to provide a
+ * debugfs file for dumping the GPU VA space regions and mappings.
+ *
+ * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from
+ * their @show callback.
+ */
+#define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data}
+
/**
* struct drm_info_list - debugfs info list entry
*
@@ -134,6 +150,9 @@ void drm_debugfs_add_file(struct drm_device *dev, const char *name,
void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files, int count);
+
+int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuva_manager *mgr);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -155,6 +174,12 @@ static inline void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files,
int count)
{}
+
+static inline int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuva_manager *mgr)
+{
+ return 0;
+}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 89e2706cac56..9813fa759b75 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -104,6 +104,12 @@ enum drm_driver_feature {
* acceleration should be handled by two drivers that are connected using auxiliary bus.
*/
DRIVER_COMPUTE_ACCEL = BIT(7),
+ /**
+ * @DRIVER_GEM_GPUVA:
+ *
+ * Driver supports user defined GPU VA bindings for GEM objects.
+ */
+ DRIVER_GEM_GPUVA = BIT(8),
/* IMPORTANT: Below are all the legacy flags, add new ones above. */
@@ -304,22 +310,14 @@ struct drm_driver {
/**
* @prime_handle_to_fd:
*
- * Main PRIME export function. Should be implemented with
- * drm_gem_prime_handle_to_fd() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME export function. Only used by vmwgfx.
*/
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
/**
* @prime_fd_to_handle:
*
- * Main PRIME import function. Should be implemented with
- * drm_gem_prime_fd_to_handle() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME import function. Only used by vmwgfx.
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
@@ -343,20 +341,6 @@ struct drm_driver {
struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
- /**
- * @gem_prime_mmap:
- *
- * mmap hook for GEM drivers, used to implement dma-buf mmap in the
- * PRIME helpers.
- *
- * This hook only exists for historical reasons. Drivers must use
- * drm_gem_prime_mmap() to implement it.
- *
- * FIXME: Convert all drivers to implement mmap in struct
- * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into
- * its callers. This hook should be removed afterwards.
- */
- int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
* @dumb_create:
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
new file mode 100644
index 000000000000..b5bf0b6da791
--- /dev/null
+++ b/include/drm/drm_exec.h
@@ -0,0 +1,150 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef __DRM_EXEC_H__
+#define __DRM_EXEC_H__
+
+#include <linux/compiler.h>
+#include <linux/ww_mutex.h>
+
+#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0)
+#define DRM_EXEC_IGNORE_DUPLICATES BIT(1)
+
+struct drm_gem_object;
+
+/**
+ * struct drm_exec - Execution context
+ */
+struct drm_exec {
+ /**
+ * @flags: Flags to control locking behavior
+ */
+ uint32_t flags;
+
+ /**
+ * @ticket: WW ticket used for acquiring locks
+ */
+ struct ww_acquire_ctx ticket;
+
+ /**
+ * @num_objects: number of objects locked
+ */
+ unsigned int num_objects;
+
+ /**
+ * @max_objects: maximum objects in array
+ */
+ unsigned int max_objects;
+
+ /**
+ * @objects: array of the locked objects
+ */
+ struct drm_gem_object **objects;
+
+ /**
+ * @contended: contended GEM object we backed off for
+ */
+ struct drm_gem_object *contended;
+
+ /**
+ * @prelocked: already locked GEM object due to contention
+ */
+ struct drm_gem_object *prelocked;
+};
+
+/**
+ * drm_exec_obj() - Return the object for a give drm_exec index
+ * @exec: Pointer to the drm_exec context
+ * @index: The index.
+ *
+ * Return: Pointer to the locked object corresponding to @index if
+ * index is within the number of locked objects. NULL otherwise.
+ */
+static inline struct drm_gem_object *
+drm_exec_obj(struct drm_exec *exec, unsigned long index)
+{
+ return index < exec->num_objects ? exec->objects[index] : NULL;
+}
+
+/**
+ * drm_exec_for_each_locked_object - iterate over all the locked objects
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object.
+ */
+#define drm_exec_for_each_locked_object(exec, index, obj) \
+ for ((index) = 0; ((obj) = drm_exec_obj(exec, index)); ++(index))
+
+/**
+ * drm_exec_for_each_locked_object_reverse - iterate over all the locked
+ * objects in reverse locking order
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object in
+ * reverse locking order. Note that @index may go below zero and wrap,
+ * but that will be caught by drm_exec_obj(), returning a NULL object.
+ */
+#define drm_exec_for_each_locked_object_reverse(exec, index, obj) \
+ for ((index) = (exec)->num_objects - 1; \
+ ((obj) = drm_exec_obj(exec, index)); --(index))
+
+/**
+ * drm_exec_until_all_locked - loop until all GEM objects are locked
+ * @exec: drm_exec object
+ *
+ * Core functionality of the drm_exec object. Loops until all GEM objects are
+ * locked and no more contention exists. At the beginning of the loop it is
+ * guaranteed that no GEM object is locked.
+ *
+ * Since labels can't be defined local to the loops body we use a jump pointer
+ * to make sure that the retry is only used from within the loops body.
+ */
+#define drm_exec_until_all_locked(exec) \
+__PASTE(__drm_exec_, __LINE__): \
+ for (void *__drm_exec_retry_ptr; ({ \
+ __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\
+ (void)__drm_exec_retry_ptr; \
+ drm_exec_cleanup(exec); \
+ });)
+
+/**
+ * drm_exec_retry_on_contention - restart the loop to grap all locks
+ * @exec: drm_exec object
+ *
+ * Control flow helper to continue when a contention was detected and we need to
+ * clean up and re-start the loop to prepare all GEM objects.
+ */
+#define drm_exec_retry_on_contention(exec) \
+ do { \
+ if (unlikely(drm_exec_is_contended(exec))) \
+ goto *__drm_exec_retry_ptr; \
+ } while (0)
+
+/**
+ * drm_exec_is_contended - check for contention
+ * @exec: drm_exec object
+ *
+ * Returns true if the drm_exec object has run into some contention while
+ * locking a GEM object and needs to clean up.
+ */
+static inline bool drm_exec_is_contended(struct drm_exec *exec)
+{
+ return !!exec->contended;
+}
+
+void drm_exec_init(struct drm_exec *exec, uint32_t flags);
+void drm_exec_fini(struct drm_exec *exec);
+bool drm_exec_cleanup(struct drm_exec *exec);
+int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences);
+int drm_exec_prepare_array(struct drm_exec *exec,
+ struct drm_gem_object **objects,
+ unsigned int num_objects,
+ unsigned int num_fences);
+
+#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 966912053cb0..010239392adf 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -50,16 +50,16 @@ struct file;
* header include loops we need it here for now.
*/
-/* Note that the order of this enum is ABI (it determines
+/* Note that the values of this enum are ABI (it determines
* /dev/dri/renderD* numbers).
*
* Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to
* be implemented before we hit any future
*/
enum drm_minor_type {
- DRM_MINOR_PRIMARY,
- DRM_MINOR_CONTROL,
- DRM_MINOR_RENDER,
+ DRM_MINOR_PRIMARY = 0,
+ DRM_MINOR_CONTROL = 1,
+ DRM_MINOR_RENDER = 2,
DRM_MINOR_ACCEL = 32,
};
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index bbc721870c13..bc9f6aa2f3fe 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -36,6 +36,8 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <drm/drm_vma_manager.h>
@@ -380,6 +382,22 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
+ * @gpuva:
+ *
+ * Provides the list of GPU VAs attached to this GEM object.
+ *
+ * Drivers should lock list accesses with the GEMs &dma_resv lock
+ * (&drm_gem_object.resv) or a custom lock if one is provided.
+ */
+ struct {
+ struct list_head list;
+
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *lock_dep_map;
+#endif
+ } gpuva;
+
+ /**
* @funcs:
*
* Optional GEM object functions. If this is set, it will be used instead of the
@@ -526,4 +544,68 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
int drm_gem_evict(struct drm_gem_object *obj);
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
+ * @obj: the &drm_gem_object
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this if you're not proctecting access to the gpuva list with the
+ * dma-resv lock, but with a custom lock.
+ */
+#define drm_gem_gpuva_set_lock(obj, lock) \
+ if (!WARN((obj)->gpuva.lock_dep_map, \
+ "GEM GPUVA lock should be set only once.")) \
+ (obj)->gpuva.lock_dep_map = &(lock)->dep_map
+#define drm_gem_gpuva_assert_lock_held(obj) \
+ lockdep_assert((obj)->gpuva.lock_dep_map ? \
+ lock_is_held((obj)->gpuva.lock_dep_map) : \
+ dma_resv_held((obj)->resv))
+#else
+#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#endif
+
+/**
+ * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
+ * @obj: the &drm_gem_object
+ *
+ * This initializes the &drm_gem_object's &drm_gpuva list.
+ *
+ * Calling this function is only necessary for drivers intending to support the
+ * &drm_driver_feature DRIVER_GEM_GPUVA.
+ *
+ * See also drm_gem_gpuva_set_lock().
+ */
+static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
+{
+ INIT_LIST_HEAD(&obj->gpuva.list);
+}
+
+/**
+ * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas
+ * @entry__: &drm_gpuva structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuva_manager.
+ */
+#define drm_gem_for_each_gpuva(entry__, obj__) \
+ list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry)
+
+/**
+ * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of
+ * gpuvas
+ * @entry__: &drm_gpuva structure to assign to in each iteration step
+ * @next__: &next &drm_gpuva to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ */
+#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \
+ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry)
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
index 8a043235dad8..a827bde494f6 100644
--- a/include/drm/drm_gem_dma_helper.h
+++ b/include/drm/drm_gem_dma_helper.h
@@ -166,11 +166,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
* DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
*/
#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = (dumb_create_func), \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table
/**
* DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
@@ -204,11 +201,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
* DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
*/
#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = dumb_create_func, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \
- .gem_prime_mmap = drm_gem_prime_mmap
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap
/**
* DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 5994fed5e327..bf0c31aa8fbe 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -27,11 +27,6 @@ struct drm_gem_shmem_object {
struct drm_gem_object base;
/**
- * @pages_lock: Protects the page table and use count
- */
- struct mutex pages_lock;
-
- /**
* @pages: Page table
*/
struct page **pages;
@@ -66,11 +61,6 @@ struct drm_gem_shmem_object {
struct sg_table *sgt;
/**
- * @vmap_lock: Protects the vmap address and use count
- */
- struct mutex vmap_lock;
-
- /**
* @vaddr: Kernel virtual address of the backing memory
*/
void *vaddr;
@@ -109,7 +99,6 @@ struct drm_gem_shmem_object {
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
!shmem->base.dma_buf && !shmem->base.import_attach;
}
-void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -290,10 +278,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
* the &drm_driver structure.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap, \
- .dumb_create = drm_gem_shmem_dumb_create
+ .dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index f4aab64411d8..e18429f09e53 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -157,12 +157,9 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
- .debugfs_init = drm_vram_mm_debugfs_init, \
- .dumb_create = drm_gem_vram_driver_dumb_create, \
- .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \
- .gem_prime_mmap = drm_gem_prime_mmap, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle
+ .debugfs_init = drm_vram_mm_debugfs_init, \
+ .dumb_create = drm_gem_vram_driver_dumb_create, \
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset
/*
* VRAM memory manager
diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuva_mgr.h
new file mode 100644
index 000000000000..ed8d50200cc3
--- /dev/null
+++ b/include/drm/drm_gpuva_mgr.h
@@ -0,0 +1,706 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __DRM_GPUVA_MGR_H__
+#define __DRM_GPUVA_MGR_H__
+
+/*
+ * Copyright (c) 2022 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include <drm/drm_gem.h>
+
+struct drm_gpuva_manager;
+struct drm_gpuva_fn_ops;
+
+/**
+ * enum drm_gpuva_flags - flags for struct drm_gpuva
+ */
+enum drm_gpuva_flags {
+ /**
+ * @DRM_GPUVA_INVALIDATED:
+ *
+ * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
+ */
+ DRM_GPUVA_INVALIDATED = (1 << 0),
+
+ /**
+ * @DRM_GPUVA_SPARSE:
+ *
+ * Flag indicating that the &drm_gpuva is a sparse mapping.
+ */
+ DRM_GPUVA_SPARSE = (1 << 1),
+
+ /**
+ * @DRM_GPUVA_USERBITS: user defined bits
+ */
+ DRM_GPUVA_USERBITS = (1 << 2),
+};
+
+/**
+ * struct drm_gpuva - structure to track a GPU VA mapping
+ *
+ * This structure represents a GPU VA mapping and is associated with a
+ * &drm_gpuva_manager.
+ *
+ * Typically, this structure is embedded in bigger driver structures.
+ */
+struct drm_gpuva {
+ /**
+ * @mgr: the &drm_gpuva_manager this object is associated with
+ */
+ struct drm_gpuva_manager *mgr;
+
+ /**
+ * @flags: the &drm_gpuva_flags for this mapping
+ */
+ enum drm_gpuva_flags flags;
+
+ /**
+ * @va: structure containing the address and range of the &drm_gpuva
+ */
+ struct {
+ /**
+ * @addr: the start address
+ */
+ u64 addr;
+
+ /*
+ * @range: the range
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and it's offset
+ */
+ struct {
+ /**
+ * @offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @obj: the mapped &drm_gem_object
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @entry: the &list_head to attach this object to a &drm_gem_object
+ */
+ struct list_head entry;
+ } gem;
+
+ /**
+ * @rb: structure containing data to store &drm_gpuvas in a rb-tree
+ */
+ struct {
+ /**
+ * @rb: the rb-tree node
+ */
+ struct rb_node node;
+
+ /**
+ * @entry: The &list_head to additionally connect &drm_gpuvas
+ * in the same order they appear in the interval tree. This is
+ * useful to keep iterating &drm_gpuvas from a start node found
+ * through the rb-tree while doing modifications on the rb-tree
+ * itself.
+ */
+ struct list_head entry;
+
+ /**
+ * @__subtree_last: needed by the interval tree, holding last-in-subtree
+ */
+ u64 __subtree_last;
+ } rb;
+};
+
+int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va);
+void drm_gpuva_remove(struct drm_gpuva *va);
+
+void drm_gpuva_link(struct drm_gpuva *va);
+void drm_gpuva_unlink(struct drm_gpuva *va);
+
+struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start);
+struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end);
+
+bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range);
+
+static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset)
+{
+ va->va.addr = addr;
+ va->va.range = range;
+ va->gem.obj = obj;
+ va->gem.offset = offset;
+}
+
+/**
+ * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
+ * invalidated
+ * @va: the &drm_gpuva to set the invalidate flag for
+ * @invalidate: indicates whether the &drm_gpuva is invalidated
+ */
+static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
+{
+ if (invalidate)
+ va->flags |= DRM_GPUVA_INVALIDATED;
+ else
+ va->flags &= ~DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
+ * is invalidated
+ * @va: the &drm_gpuva to check
+ */
+static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
+{
+ return va->flags & DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * struct drm_gpuva_manager - DRM GPU VA Manager
+ *
+ * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
+ * &maple_tree structures. Typically, this structure is embedded in bigger
+ * driver structures.
+ *
+ * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
+ * pages.
+ *
+ * There should be one manager instance per GPU virtual address space.
+ */
+struct drm_gpuva_manager {
+ /**
+ * @name: the name of the DRM GPU VA space
+ */
+ const char *name;
+
+ /**
+ * @mm_start: start of the VA space
+ */
+ u64 mm_start;
+
+ /**
+ * @mm_range: length of the VA space
+ */
+ u64 mm_range;
+
+ /**
+ * @rb: structures to track &drm_gpuva entries
+ */
+ struct {
+ /**
+ * @tree: the rb-tree to track GPU VA mappings
+ */
+ struct rb_root_cached tree;
+
+ /**
+ * @list: the &list_head to track GPU VA mappings
+ */
+ struct list_head list;
+ } rb;
+
+ /**
+ * @kernel_alloc_node:
+ *
+ * &drm_gpuva representing the address space cutout reserved for
+ * the kernel
+ */
+ struct drm_gpuva kernel_alloc_node;
+
+ /**
+ * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers
+ */
+ const struct drm_gpuva_fn_ops *ops;
+};
+
+void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
+ const char *name,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuva_fn_ops *ops);
+void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr);
+
+static inline struct drm_gpuva *
+__drm_gpuva_next(struct drm_gpuva *va)
+{
+ if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list))
+ return list_next_entry(va, rb.entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @mgr__: &drm_gpuva_manager to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * between @start__ and @end__. It is implemented similarly to list_for_each(),
+ * but is using the &drm_gpuva_manager's internal interval tree to accelerate
+ * the search for the starting &drm_gpuva, and hence isn't safe against removal
+ * of elements. It assumes that @end__ is within (or is the upper limit of) the
+ * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's
+ * @kernel_alloc_node.
+ */
+#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of
+ * &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @mgr__: &drm_gpuva_manager to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * between @start__ and @end__. It is implemented similarly to
+ * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval
+ * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
+ * against removal of elements. It assumes that @end__ is within (or is the
+ * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the
+ * &drm_gpuva_manager's @kernel_alloc_node.
+ */
+#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \
+ next__ = __drm_gpuva_next(va__); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = next__, next__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @mgr__: &drm_gpuva_manager to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuva_manager.
+ */
+#define drm_gpuva_for_each_va(va__, mgr__) \
+ list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry)
+
+/**
+ * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @mgr__: &drm_gpuva_manager to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and
+ * hence safe against the removal of elements.
+ */
+#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \
+ list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry)
+
+/**
+ * enum drm_gpuva_op_type - GPU VA operation type
+ *
+ * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager.
+ */
+enum drm_gpuva_op_type {
+ /**
+ * @DRM_GPUVA_OP_MAP: the map op type
+ */
+ DRM_GPUVA_OP_MAP,
+
+ /**
+ * @DRM_GPUVA_OP_REMAP: the remap op type
+ */
+ DRM_GPUVA_OP_REMAP,
+
+ /**
+ * @DRM_GPUVA_OP_UNMAP: the unmap op type
+ */
+ DRM_GPUVA_OP_UNMAP,
+
+ /**
+ * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
+ */
+ DRM_GPUVA_OP_PREFETCH,
+};
+
+/**
+ * struct drm_gpuva_op_map - GPU VA map operation
+ *
+ * This structure represents a single map operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_map {
+ /**
+ * @va: structure containing address and range of a map
+ * operation
+ */
+ struct {
+ /**
+ * @addr: the base address of the new mapping
+ */
+ u64 addr;
+
+ /**
+ * @range: the range of the new mapping
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and it's offset
+ */
+ struct {
+ /**
+ * @offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @obj: the &drm_gem_object to map
+ */
+ struct drm_gem_object *obj;
+ } gem;
+};
+
+/**
+ * struct drm_gpuva_op_unmap - GPU VA unmap operation
+ *
+ * This structure represents a single unmap operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_unmap {
+ /**
+ * @va: the &drm_gpuva to unmap
+ */
+ struct drm_gpuva *va;
+
+ /**
+ * @keep:
+ *
+ * Indicates whether this &drm_gpuva is physically contiguous with the
+ * original mapping request.
+ *
+ * Optionally, if &keep is set, drivers may keep the actual page table
+ * mappings for this &drm_gpuva, adding the missing page table entries
+ * only and update the &drm_gpuva_manager accordingly.
+ */
+ bool keep;
+};
+
+/**
+ * struct drm_gpuva_op_remap - GPU VA remap operation
+ *
+ * This represents a single remap operation generated by the DRM GPU VA manager.
+ *
+ * A remap operation is generated when an existing GPU VA mmapping is split up
+ * by inserting a new GPU VA mapping or by partially unmapping existent
+ * mapping(s), hence it consists of a maximum of two map and one unmap
+ * operation.
+ *
+ * The @unmap operation takes care of removing the original existing mapping.
+ * @prev is used to remap the preceding part, @next the subsequent part.
+ *
+ * If either a new mapping's start address is aligned with the start address
+ * of the old mapping or the new mapping's end address is aligned with the
+ * end address of the old mapping, either @prev or @next is NULL.
+ *
+ * Note, the reason for a dedicated remap operation, rather than arbitrary
+ * unmap and map operations, is to give drivers the chance of extracting driver
+ * specific data for creating the new mappings from the unmap operations's
+ * &drm_gpuva structure which typically is embedded in larger driver specific
+ * structures.
+ */
+struct drm_gpuva_op_remap {
+ /**
+ * @prev: the preceding part of a split mapping
+ */
+ struct drm_gpuva_op_map *prev;
+
+ /**
+ * @next: the subsequent part of a split mapping
+ */
+ struct drm_gpuva_op_map *next;
+
+ /**
+ * @unmap: the unmap operation for the original existing mapping
+ */
+ struct drm_gpuva_op_unmap *unmap;
+};
+
+/**
+ * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
+ *
+ * This structure represents a single prefetch operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_prefetch {
+ /**
+ * @va: the &drm_gpuva to prefetch
+ */
+ struct drm_gpuva *va;
+};
+
+/**
+ * struct drm_gpuva_op - GPU VA operation
+ *
+ * This structure represents a single generic operation.
+ *
+ * The particular type of the operation is defined by @op.
+ */
+struct drm_gpuva_op {
+ /**
+ * @entry:
+ *
+ * The &list_head used to distribute instances of this struct within
+ * &drm_gpuva_ops.
+ */
+ struct list_head entry;
+
+ /**
+ * @op: the type of the operation
+ */
+ enum drm_gpuva_op_type op;
+
+ union {
+ /**
+ * @map: the map operation
+ */
+ struct drm_gpuva_op_map map;
+
+ /**
+ * @remap: the remap operation
+ */
+ struct drm_gpuva_op_remap remap;
+
+ /**
+ * @unmap: the unmap operation
+ */
+ struct drm_gpuva_op_unmap unmap;
+
+ /**
+ * @prefetch: the prefetch operation
+ */
+ struct drm_gpuva_op_prefetch prefetch;
+ };
+};
+
+/**
+ * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
+ */
+struct drm_gpuva_ops {
+ /**
+ * @list: the &list_head
+ */
+ struct list_head list;
+};
+
+/**
+ * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations.
+ */
+#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @next: &next &drm_gpuva_op to store the next step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations. It is
+ * implemented with list_for_each_safe(), so save against removal of elements.
+ */
+#define drm_gpuva_for_each_op_safe(op, next, ops) \
+ list_for_each_entry_safe(op, next, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations beginning
+ * from the given operation in reverse order.
+ */
+#define drm_gpuva_for_each_op_from_reverse(op, ops) \
+ list_for_each_entry_from_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
+ */
+#define drm_gpuva_first_op(ops) \
+ list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
+ */
+#define drm_gpuva_last_op(ops) \
+ list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
+
+/**
+ * drm_gpuva_next_op() - next &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+
+struct drm_gpuva_ops *
+drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset);
+struct drm_gpuva_ops *
+drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
+ struct drm_gem_object *obj);
+
+void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva_ops *ops);
+
+static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op)
+{
+ drm_gpuva_init(va, op->va.addr, op->va.range,
+ op->gem.obj, op->gem.offset);
+}
+
+/**
+ * struct drm_gpuva_fn_ops - callbacks for split/merge steps
+ *
+ * This structure defines the callbacks used by &drm_gpuva_sm_map and
+ * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap
+ * operations to drivers.
+ */
+struct drm_gpuva_fn_ops {
+ /**
+ * @op_alloc: called when the &drm_gpuva_manager allocates
+ * a struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuva_op *(*op_alloc)(void);
+
+ /**
+ * @op_free: called when the &drm_gpuva_manager frees a
+ * struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*op_free)(struct drm_gpuva_op *op);
+
+ /**
+ * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the
+ * mapping once all previous steps were completed
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ *
+ * Can be NULL if &drm_gpuva_sm_map is used.
+ */
+ int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_remap: called from &drm_gpuva_sm_map and
+ * &drm_gpuva_sm_unmap to split up an existent mapping
+ *
+ * This callback is called when existent mapping needs to be split up.
+ * This is the case when either a newly requested mapping overlaps or
+ * is enclosed by an existent mapping or a partial unmap of an existent
+ * mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+ * used.
+ */
+ int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_unmap: called from &drm_gpuva_sm_map and
+ * &drm_gpuva_sm_unmap to unmap an existent mapping
+ *
+ * This callback is called when existent mapping needs to be unmapped.
+ * This is the case when either a newly requested mapping encloses an
+ * existent mapping or an unmap of an existent mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+ * used.
+ */
+ int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
+};
+
+int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset);
+
+int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
+ u64 addr, u64 range);
+
+void drm_gpuva_map(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op);
+
+void drm_gpuva_remap(struct drm_gpuva *prev,
+ struct drm_gpuva *next,
+ struct drm_gpuva_op_remap *op);
+
+void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+
+#endif /* __DRM_GPUVA_MGR_H__ */
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index ed013fdcc1ff..ba483c87f0e7 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -3,6 +3,8 @@
#ifndef DRM_KUNIT_HELPERS_H_
#define DRM_KUNIT_HELPERS_H_
+#include <linux/device.h>
+
#include <kunit/test.h>
struct drm_device;
@@ -51,7 +53,7 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
{
struct drm_driver *driver;
- driver = kunit_kzalloc(test, sizeof(*driver), GFP_KERNEL);
+ driver = devm_kzalloc(dev, sizeof(*driver), GFP_KERNEL);
KUNIT_ASSERT_NOT_NULL(test, driver);
driver->driver_features = features;
@@ -87,5 +89,12 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
sizeof(_type), \
offsetof(_type, _member), \
_feat))
+struct drm_modeset_acquire_ctx *
+drm_kunit_helper_acquire_ctx_alloc(struct kunit *test);
+
+struct drm_atomic_state *
+drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_modeset_acquire_ctx *ctx);
#endif // DRM_KUNIT_HELPERS_H_
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 965faf082a6d..e3c3ac615909 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -59,8 +59,8 @@ enum mode_set_atomic {
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_crtc_helper_funcs {
/**
@@ -216,9 +216,7 @@ struct drm_crtc_helper_funcs {
*
* This callback is used to update the display mode of a CRTC without
* changing anything of the primary plane configuration. This fits the
- * requirement of atomic and hence is used by the atomic helpers. It is
- * also used by the transitional plane helpers to implement a
- * @mode_set hook in drm_helper_crtc_mode_set().
+ * requirement of atomic and hence is used by the atomic helpers.
*
* Note that the display pipe is completely off when this function is
* called. Atomic drivers which need hardware to be running before they
@@ -333,8 +331,8 @@ struct drm_crtc_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
@@ -373,8 +371,8 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_begin)(struct drm_crtc *crtc,
struct drm_atomic_state *state);
@@ -397,8 +395,8 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_flush)(struct drm_crtc *crtc,
struct drm_atomic_state *state);
@@ -507,8 +505,8 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
/**
* struct drm_encoder_helper_funcs - helper operations for encoders
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_encoder_helper_funcs {
/**
@@ -1185,8 +1183,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
/**
* struct drm_plane_helper_funcs - helper operations for planes
*
- * These functions are used by the atomic helpers and by the transitional plane
- * helpers.
+ * These functions are used by the atomic helpers.
*/
struct drm_plane_helper_funcs {
/**
@@ -1221,9 +1218,8 @@ struct drm_plane_helper_funcs {
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional. See @begin_fb_access
- * for preparing per-commit resources.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. See @begin_fb_access for preparing per-commit resources.
*
* RETURNS:
*
@@ -1240,8 +1236,8 @@ struct drm_plane_helper_funcs {
* This hook is called to clean up any resources allocated for the given
* framebuffer and plane configuration in @prepare_fb.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*cleanup_fb)(struct drm_plane *plane,
struct drm_plane_state *old_state);
@@ -1295,8 +1291,8 @@ struct drm_plane_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
@@ -1326,8 +1322,7 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
struct drm_atomic_state *state);
@@ -1376,9 +1371,8 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional. It's intended to
- * reverse the effects of @atomic_enable.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. It's intended to reverse the effects of @atomic_enable.
*/
void (*atomic_disable)(struct drm_plane *plane,
struct drm_atomic_state *state);
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 432fab2347eb..10015891b056 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -27,12 +27,14 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/list.h>
+#include <linux/mutex.h>
struct backlight_device;
struct dentry;
struct device_node;
struct drm_connector;
struct drm_device;
+struct drm_panel_follower;
struct drm_panel;
struct display_timing;
@@ -144,6 +146,45 @@ struct drm_panel_funcs {
void (*debugfs_init)(struct drm_panel *panel, struct dentry *root);
};
+struct drm_panel_follower_funcs {
+ /**
+ * @panel_prepared:
+ *
+ * Called after the panel has been powered on.
+ */
+ int (*panel_prepared)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_unpreparing:
+ *
+ * Called before the panel is powered off.
+ */
+ int (*panel_unpreparing)(struct drm_panel_follower *follower);
+};
+
+struct drm_panel_follower {
+ /**
+ * @funcs:
+ *
+ * Dependent device callbacks; should be initted by the caller.
+ */
+ const struct drm_panel_follower_funcs *funcs;
+
+ /**
+ * @list
+ *
+ * Used for linking into panel's list; set by drm_panel_add_follower().
+ */
+ struct list_head list;
+
+ /**
+ * @panel
+ *
+ * The panel we're dependent on; set by drm_panel_add_follower().
+ */
+ struct drm_panel *panel;
+};
+
/**
* struct drm_panel - DRM panel object
*/
@@ -190,6 +231,20 @@ struct drm_panel {
struct list_head list;
/**
+ * @followers:
+ *
+ * A list of struct drm_panel_follower dependent on this panel.
+ */
+ struct list_head followers;
+
+ /**
+ * @follower_lock:
+ *
+ * Lock for followers list.
+ */
+ struct mutex follower_lock;
+
+ /**
* @prepare_prev_first:
*
* The previous controller should be prepared first, before the prepare
@@ -198,6 +253,20 @@ struct drm_panel {
* the panel is powered up.
*/
bool prepare_prev_first;
+
+ /**
+ * @prepared:
+ *
+ * If true then the panel has been prepared.
+ */
+ bool prepared;
+
+ /**
+ * @enabled:
+ *
+ * If true then the panel has been enabled.
+ */
+ bool enabled;
};
void drm_panel_init(struct drm_panel *panel, struct device *dev,
@@ -232,6 +301,33 @@ static inline int of_drm_get_panel_orientation(const struct device_node *np,
}
#endif
+#if defined(CONFIG_DRM_PANEL)
+bool drm_is_panel_follower(struct device *dev);
+int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+void drm_panel_remove_follower(struct drm_panel_follower *follower);
+int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+#else
+static inline bool drm_is_panel_follower(struct device *dev)
+{
+ return false;
+}
+
+static inline int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+
+static inline void drm_panel_remove_follower(struct drm_panel_follower *follower) { }
+static inline int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+#endif
+
#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
(IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
int drm_panel_of_backlight(struct drm_panel *panel);
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 51291983ea44..79d62856defb 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -56,7 +56,7 @@ struct drm_plane_state {
/**
* @crtc:
*
- * Currently bound CRTC, NULL if disabled. Do not this write directly,
+ * Currently bound CRTC, NULL if disabled. Do not write this directly,
* use drm_atomic_set_crtc_for_plane()
*/
struct drm_crtc *crtc;
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 2a1d01e5b56b..a7abf9f3e697 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -60,19 +60,12 @@ enum dma_data_direction;
struct drm_device;
struct drm_gem_object;
-struct drm_file;
/* core prime functions */
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd, uint32_t *handle);
-int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle, uint32_t flags,
- int *prime_fd);
-
/* helper functions for exporting */
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 6cf7243a1dc5..b40052132e52 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -54,7 +54,11 @@ struct drm_syncobj {
*/
struct list_head cb_list;
/**
- * @lock: Protects &cb_list and write-locks &fence.
+ * @ev_fd_list: List of registered eventfd.
+ */
+ struct list_head ev_fd_list;
+ /**
+ * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
*/
spinlock_t lock;
/**
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index 6273cac44e47..96a5d858404b 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -12,6 +12,6 @@ void drm_class_device_unregister(struct device *dev);
void drm_sysfs_hotplug_event(struct drm_device *dev);
void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
-void drm_sysfs_connector_status_event(struct drm_connector *connector,
- struct drm_property *property);
+void drm_sysfs_connector_property_event(struct drm_connector *connector,
+ struct drm_property *property);
#endif
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
index 087e3f649c52..f6e6ed529681 100644
--- a/include/drm/task_barrier.h
+++ b/include/drm/task_barrier.h
@@ -24,8 +24,8 @@
#include <linux/atomic.h>
/*
- * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
- * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks.
+ * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/
*/
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 8b113c384236..0223a41a64b2 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -355,8 +355,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
void ttm_bo_put(struct ttm_buffer_object *bo);
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
diff --git a/include/dt-bindings/arm/qcom,ids.h b/include/dt-bindings/arm/qcom,ids.h
index bcbe9ee2cdaf..be12e1dd1f38 100644
--- a/include/dt-bindings/arm/qcom,ids.h
+++ b/include/dt-bindings/arm/qcom,ids.h
@@ -215,6 +215,7 @@
#define QCOM_ID_SDA429W 437
#define QCOM_ID_SM8350 439
#define QCOM_ID_QCM2290 441
+#define QCOM_ID_SM7125 443
#define QCOM_ID_SM6115 444
#define QCOM_ID_IPQ5010 446
#define QCOM_ID_IPQ5018 447
@@ -249,8 +250,8 @@
#define QCOM_ID_SA8775P 534
#define QCOM_ID_QRU1000 539
#define QCOM_ID_QDU1000 545
+#define QCOM_ID_SM4450 568
#define QCOM_ID_QDU1010 587
-#define QCOM_ID_IPQ5019 569
#define QCOM_ID_QRU1032 588
#define QCOM_ID_QRU1052 589
#define QCOM_ID_QRU1062 590
diff --git a/include/dt-bindings/ata/ahci.h b/include/dt-bindings/ata/ahci.h
index 77997b35612c..b3f3b7cf9af8 100644
--- a/include/dt-bindings/ata/ahci.h
+++ b/include/dt-bindings/ata/ahci.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only or BSD-2-Clause */
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
/*
* This header provides constants for most AHCI bindings.
*/
diff --git a/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
index ff2730f398a6..06f198ee7623 100644
--- a/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
+++ b/include/dt-bindings/clock/amlogic,a1-peripherals-clkc.h
@@ -10,6 +10,7 @@
#ifndef __A1_PERIPHERALS_CLKC_H
#define __A1_PERIPHERALS_CLKC_H
+#define CLKID_XTAL_IN 0
#define CLKID_FIXPLL_IN 1
#define CLKID_USB_PHY_IN 2
#define CLKID_USB_CTRL_IN 3
@@ -70,6 +71,8 @@
#define CLKID_CPU_CTRL 58
#define CLKID_ROM 59
#define CLKID_PROC_I2C 60
+#define CLKID_DSPA_SEL 61
+#define CLKID_DSPB_SEL 62
#define CLKID_DSPA_EN 63
#define CLKID_DSPA_EN_NIC 64
#define CLKID_DSPB_EN 65
@@ -81,6 +84,7 @@
#define CLKID_12M 71
#define CLKID_FCLK_DIV2_DIVN 72
#define CLKID_GEN 73
+#define CLKID_SARADC_SEL 74
#define CLKID_SARADC 75
#define CLKID_PWM_A 76
#define CLKID_PWM_B 77
@@ -95,21 +99,70 @@
#define CLKID_SD_EMMC 86
#define CLKID_PSRAM 87
#define CLKID_DMC 88
+#define CLKID_SYS_A_SEL 89
+#define CLKID_SYS_A_DIV 90
+#define CLKID_SYS_A 91
+#define CLKID_SYS_B_SEL 92
+#define CLKID_SYS_B_DIV 93
+#define CLKID_SYS_B 94
#define CLKID_DSPA_A_SEL 95
+#define CLKID_DSPA_A_DIV 96
+#define CLKID_DSPA_A 97
#define CLKID_DSPA_B_SEL 98
+#define CLKID_DSPA_B_DIV 99
+#define CLKID_DSPA_B 100
#define CLKID_DSPB_A_SEL 101
+#define CLKID_DSPB_A_DIV 102
+#define CLKID_DSPB_A 103
#define CLKID_DSPB_B_SEL 104
+#define CLKID_DSPB_B_DIV 105
+#define CLKID_DSPB_B 106
+#define CLKID_RTC_32K_IN 107
+#define CLKID_RTC_32K_DIV 108
+#define CLKID_RTC_32K_XTAL 109
+#define CLKID_RTC_32K_SEL 110
+#define CLKID_CECB_32K_IN 111
+#define CLKID_CECB_32K_DIV 112
#define CLKID_CECB_32K_SEL_PRE 113
#define CLKID_CECB_32K_SEL 114
+#define CLKID_CECA_32K_IN 115
+#define CLKID_CECA_32K_DIV 116
#define CLKID_CECA_32K_SEL_PRE 117
#define CLKID_CECA_32K_SEL 118
+#define CLKID_DIV2_PRE 119
+#define CLKID_24M_DIV2 120
#define CLKID_GEN_SEL 121
+#define CLKID_GEN_DIV 122
+#define CLKID_SARADC_DIV 123
#define CLKID_PWM_A_SEL 124
+#define CLKID_PWM_A_DIV 125
#define CLKID_PWM_B_SEL 126
+#define CLKID_PWM_B_DIV 127
#define CLKID_PWM_C_SEL 128
+#define CLKID_PWM_C_DIV 129
#define CLKID_PWM_D_SEL 130
+#define CLKID_PWM_D_DIV 131
#define CLKID_PWM_E_SEL 132
+#define CLKID_PWM_E_DIV 133
#define CLKID_PWM_F_SEL 134
+#define CLKID_PWM_F_DIV 135
+#define CLKID_SPICC_SEL 136
+#define CLKID_SPICC_DIV 137
+#define CLKID_SPICC_SEL2 138
+#define CLKID_TS_DIV 139
+#define CLKID_SPIFC_SEL 140
+#define CLKID_SPIFC_DIV 141
+#define CLKID_SPIFC_SEL2 142
+#define CLKID_USB_BUS_SEL 143
+#define CLKID_USB_BUS_DIV 144
+#define CLKID_SD_EMMC_SEL 145
+#define CLKID_SD_EMMC_DIV 146
#define CLKID_SD_EMMC_SEL2 147
+#define CLKID_PSRAM_SEL 148
+#define CLKID_PSRAM_DIV 149
+#define CLKID_PSRAM_SEL2 150
+#define CLKID_DMC_SEL 151
+#define CLKID_DMC_DIV 152
+#define CLKID_DMC_SEL2 153
#endif /* __A1_PERIPHERALS_CLKC_H */
diff --git a/include/dt-bindings/clock/amlogic,a1-pll-clkc.h b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
index 01fb8164ac29..2b660c0f2c9f 100644
--- a/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
+++ b/include/dt-bindings/clock/amlogic,a1-pll-clkc.h
@@ -10,7 +10,12 @@
#ifndef __A1_PLL_CLKC_H
#define __A1_PLL_CLKC_H
+#define CLKID_FIXED_PLL_DCO 0
#define CLKID_FIXED_PLL 1
+#define CLKID_FCLK_DIV2_DIV 2
+#define CLKID_FCLK_DIV3_DIV 3
+#define CLKID_FCLK_DIV5_DIV 4
+#define CLKID_FCLK_DIV7_DIV 5
#define CLKID_FCLK_DIV2 6
#define CLKID_FCLK_DIV3 7
#define CLKID_FCLK_DIV5 8
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index e149eee61588..712782177c90 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -90,7 +90,19 @@
/* Only list resets here that are not part of a clock gate + reset pair */
#define ASPEED_RESET_ADC 55
#define ASPEED_RESET_JTAG_MASTER2 54
+
+#define ASPEED_RESET_MAC4 53
+#define ASPEED_RESET_MAC3 52
+
+#define ASPEED_RESET_I3C5 45
+#define ASPEED_RESET_I3C4 44
+#define ASPEED_RESET_I3C3 43
+#define ASPEED_RESET_I3C2 42
+#define ASPEED_RESET_I3C1 41
+#define ASPEED_RESET_I3C0 40
+#define ASPEED_RESET_I3C 39
#define ASPEED_RESET_I3C_DMA 39
+
#define ASPEED_RESET_PWM 37
#define ASPEED_RESET_PECI 36
#define ASPEED_RESET_MII 35
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index f561f5c5ef8f..08c82c22fa5f 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -37,6 +37,26 @@
#define AUD_CLKID_SPDIFIN_CLK 56
#define AUD_CLKID_PDM_DCLK 57
#define AUD_CLKID_PDM_SYSCLK 58
+#define AUD_CLKID_MST_A_MCLK_SEL 59
+#define AUD_CLKID_MST_B_MCLK_SEL 60
+#define AUD_CLKID_MST_C_MCLK_SEL 61
+#define AUD_CLKID_MST_D_MCLK_SEL 62
+#define AUD_CLKID_MST_E_MCLK_SEL 63
+#define AUD_CLKID_MST_F_MCLK_SEL 64
+#define AUD_CLKID_MST_A_MCLK_DIV 65
+#define AUD_CLKID_MST_B_MCLK_DIV 66
+#define AUD_CLKID_MST_C_MCLK_DIV 67
+#define AUD_CLKID_MST_D_MCLK_DIV 68
+#define AUD_CLKID_MST_E_MCLK_DIV 69
+#define AUD_CLKID_MST_F_MCLK_DIV 70
+#define AUD_CLKID_SPDIFOUT_CLK_SEL 71
+#define AUD_CLKID_SPDIFOUT_CLK_DIV 72
+#define AUD_CLKID_SPDIFIN_CLK_SEL 73
+#define AUD_CLKID_SPDIFIN_CLK_DIV 74
+#define AUD_CLKID_PDM_DCLK_SEL 75
+#define AUD_CLKID_PDM_DCLK_DIV 76
+#define AUD_CLKID_PDM_SYSCLK_SEL 77
+#define AUD_CLKID_PDM_SYSCLK_DIV 78
#define AUD_CLKID_MST_A_SCLK 79
#define AUD_CLKID_MST_B_SCLK 80
#define AUD_CLKID_MST_C_SCLK 81
@@ -49,6 +69,30 @@
#define AUD_CLKID_MST_D_LRCLK 89
#define AUD_CLKID_MST_E_LRCLK 90
#define AUD_CLKID_MST_F_LRCLK 91
+#define AUD_CLKID_MST_A_SCLK_PRE_EN 92
+#define AUD_CLKID_MST_B_SCLK_PRE_EN 93
+#define AUD_CLKID_MST_C_SCLK_PRE_EN 94
+#define AUD_CLKID_MST_D_SCLK_PRE_EN 95
+#define AUD_CLKID_MST_E_SCLK_PRE_EN 96
+#define AUD_CLKID_MST_F_SCLK_PRE_EN 97
+#define AUD_CLKID_MST_A_SCLK_DIV 98
+#define AUD_CLKID_MST_B_SCLK_DIV 99
+#define AUD_CLKID_MST_C_SCLK_DIV 100
+#define AUD_CLKID_MST_D_SCLK_DIV 101
+#define AUD_CLKID_MST_E_SCLK_DIV 102
+#define AUD_CLKID_MST_F_SCLK_DIV 103
+#define AUD_CLKID_MST_A_SCLK_POST_EN 104
+#define AUD_CLKID_MST_B_SCLK_POST_EN 105
+#define AUD_CLKID_MST_C_SCLK_POST_EN 106
+#define AUD_CLKID_MST_D_SCLK_POST_EN 107
+#define AUD_CLKID_MST_E_SCLK_POST_EN 108
+#define AUD_CLKID_MST_F_SCLK_POST_EN 109
+#define AUD_CLKID_MST_A_LRCLK_DIV 110
+#define AUD_CLKID_MST_B_LRCLK_DIV 111
+#define AUD_CLKID_MST_C_LRCLK_DIV 112
+#define AUD_CLKID_MST_D_LRCLK_DIV 113
+#define AUD_CLKID_MST_E_LRCLK_DIV 114
+#define AUD_CLKID_MST_F_LRCLK_DIV 115
#define AUD_CLKID_TDMIN_A_SCLK_SEL 116
#define AUD_CLKID_TDMIN_B_SCLK_SEL 117
#define AUD_CLKID_TDMIN_C_SCLK_SEL 118
@@ -70,8 +114,24 @@
#define AUD_CLKID_TDMOUT_A_LRCLK 134
#define AUD_CLKID_TDMOUT_B_LRCLK 135
#define AUD_CLKID_TDMOUT_C_LRCLK 136
+#define AUD_CLKID_TDMIN_A_SCLK_PRE_EN 137
+#define AUD_CLKID_TDMIN_B_SCLK_PRE_EN 138
+#define AUD_CLKID_TDMIN_C_SCLK_PRE_EN 139
+#define AUD_CLKID_TDMIN_LB_SCLK_PRE_EN 140
+#define AUD_CLKID_TDMOUT_A_SCLK_PRE_EN 141
+#define AUD_CLKID_TDMOUT_B_SCLK_PRE_EN 142
+#define AUD_CLKID_TDMOUT_C_SCLK_PRE_EN 143
+#define AUD_CLKID_TDMIN_A_SCLK_POST_EN 144
+#define AUD_CLKID_TDMIN_B_SCLK_POST_EN 145
+#define AUD_CLKID_TDMIN_C_SCLK_POST_EN 146
+#define AUD_CLKID_TDMIN_LB_SCLK_POST_EN 147
+#define AUD_CLKID_TDMOUT_A_SCLK_POST_EN 148
+#define AUD_CLKID_TDMOUT_B_SCLK_POST_EN 149
+#define AUD_CLKID_TDMOUT_C_SCLK_POST_EN 150
#define AUD_CLKID_SPDIFOUT_B 151
#define AUD_CLKID_SPDIFOUT_B_CLK 152
+#define AUD_CLKID_SPDIFOUT_B_CLK_SEL 153
+#define AUD_CLKID_SPDIFOUT_B_CLK_DIV 154
#define AUD_CLKID_TDM_MCLK_PAD0 155
#define AUD_CLKID_TDM_MCLK_PAD1 156
#define AUD_CLKID_TDM_LRCLK_PAD0 157
@@ -90,5 +150,10 @@
#define AUD_CLKID_FRDDR_D 170
#define AUD_CLKID_TODDR_D 171
#define AUD_CLKID_LOOPBACK_B 172
+#define AUD_CLKID_CLK81_EN 173
+#define AUD_CLKID_SYSCLK_A_DIV 174
+#define AUD_CLKID_SYSCLK_B_DIV 175
+#define AUD_CLKID_SYSCLK_A_EN 176
+#define AUD_CLKID_SYSCLK_B_EN 177
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index 93752ea107e3..442162822b88 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -16,6 +16,8 @@
#define CLKID_FCLK_DIV5 5
#define CLKID_FCLK_DIV7 6
#define CLKID_GP0_PLL 7
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
#define CLKID_CLK81 10
#define CLKID_MPLL0 11
#define CLKID_MPLL1 12
@@ -67,23 +69,66 @@
#define CLKID_AO_I2C 58
#define CLKID_SD_EMMC_B_CLK0 59
#define CLKID_SD_EMMC_C_CLK0 60
+#define CLKID_SD_EMMC_B_CLK0_SEL 61
+#define CLKID_SD_EMMC_B_CLK0_DIV 62
+#define CLKID_SD_EMMC_C_CLK0_SEL 63
+#define CLKID_SD_EMMC_C_CLK0_DIV 64
+#define CLKID_MPLL0_DIV 65
+#define CLKID_MPLL1_DIV 66
+#define CLKID_MPLL2_DIV 67
+#define CLKID_MPLL3_DIV 68
#define CLKID_HIFI_PLL 69
+#define CLKID_MPLL_PREDIV 70
+#define CLKID_FCLK_DIV2_DIV 71
+#define CLKID_FCLK_DIV3_DIV 72
+#define CLKID_FCLK_DIV4_DIV 73
+#define CLKID_FCLK_DIV5_DIV 74
+#define CLKID_FCLK_DIV7_DIV 75
+#define CLKID_PCIE_PLL 76
+#define CLKID_PCIE_MUX 77
+#define CLKID_PCIE_REF 78
#define CLKID_PCIE_CML_EN0 79
#define CLKID_PCIE_CML_EN1 80
+#define CLKID_GEN_CLK_SEL 82
+#define CLKID_GEN_CLK_DIV 83
#define CLKID_GEN_CLK 84
+#define CLKID_SYS_PLL_DCO 85
+#define CLKID_FIXED_PLL_DCO 86
+#define CLKID_GP0_PLL_DCO 87
+#define CLKID_HIFI_PLL_DCO 88
+#define CLKID_PCIE_PLL_DCO 89
+#define CLKID_PCIE_PLL_OD 90
+#define CLKID_VPU_0_DIV 91
#define CLKID_VPU_0_SEL 92
#define CLKID_VPU_0 93
+#define CLKID_VPU_1_DIV 94
#define CLKID_VPU_1_SEL 95
#define CLKID_VPU_1 96
#define CLKID_VPU 97
+#define CLKID_VAPB_0_DIV 98
#define CLKID_VAPB_0_SEL 99
#define CLKID_VAPB_0 100
+#define CLKID_VAPB_1_DIV 101
#define CLKID_VAPB_1_SEL 102
#define CLKID_VAPB_1 103
#define CLKID_VAPB_SEL 104
#define CLKID_VAPB 105
#define CLKID_VCLK 106
#define CLKID_VCLK2 107
+#define CLKID_VCLK_SEL 108
+#define CLKID_VCLK2_SEL 109
+#define CLKID_VCLK_INPUT 110
+#define CLKID_VCLK2_INPUT 111
+#define CLKID_VCLK_DIV 112
+#define CLKID_VCLK2_DIV 113
+#define CLKID_VCLK_DIV2_EN 114
+#define CLKID_VCLK_DIV4_EN 115
+#define CLKID_VCLK_DIV6_EN 116
+#define CLKID_VCLK_DIV12_EN 117
+#define CLKID_VCLK2_DIV2_EN 118
+#define CLKID_VCLK2_DIV4_EN 119
+#define CLKID_VCLK2_DIV6_EN 120
+#define CLKID_VCLK2_DIV12_EN 121
#define CLKID_VCLK_DIV1 122
#define CLKID_VCLK_DIV2 123
#define CLKID_VCLK_DIV4 124
@@ -94,7 +139,10 @@
#define CLKID_VCLK2_DIV4 129
#define CLKID_VCLK2_DIV6 130
#define CLKID_VCLK2_DIV12 131
+#define CLKID_CTS_ENCL_SEL 132
#define CLKID_CTS_ENCL 133
+#define CLKID_VDIN_MEAS_SEL 134
+#define CLKID_VDIN_MEAS_DIV 135
#define CLKID_VDIN_MEAS 136
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/exynos3250.h b/include/dt-bindings/clock/exynos3250.h
index fe8214017b46..cc7268151843 100644
--- a/include/dt-bindings/clock/exynos3250.h
+++ b/include/dt-bindings/clock/exynos3250.h
@@ -257,12 +257,6 @@
#define CLK_SCLK_MMC2 249
/*
- * Total number of clocks of main CMU.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define CLK_NR_CLKS 250
-
-/*
* CMU DMC
*/
@@ -284,12 +278,6 @@
#define CLK_DIV_DMCD 20
/*
- * Total number of clocks of main CMU.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define NR_CLKS_DMC 21
-
-/*
* CMU ISP
*/
@@ -344,10 +332,4 @@
#define CLK_ASYNCAXIM 46
#define CLK_SCLK_MPWM_ISP 47
-/*
- * Total number of clocks of CMU_ISP.
- * NOTE: Must be equal to last clock ID increased by one.
- */
-#define NR_CLKS_ISP 48
-
#endif /* _DT_BINDINGS_CLOCK_SAMSUNG_EXYNOS3250_CLOCK_H */
diff --git a/include/dt-bindings/clock/exynos4.h b/include/dt-bindings/clock/exynos4.h
index acbfbab875ec..4ebff79ed9e2 100644
--- a/include/dt-bindings/clock/exynos4.h
+++ b/include/dt-bindings/clock/exynos4.h
@@ -239,9 +239,6 @@
#define CLK_DIV_GDR 460
#define CLK_DIV_CORE2 461
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 462
-
/* Exynos4x12 ISP clocks */
#define CLK_ISP_FIMC_ISP 1
#define CLK_ISP_FIMC_DRC 2
@@ -275,6 +272,4 @@
#define CLK_ISP_DIV_MCUISP0 29
#define CLK_ISP_DIV_MCUISP1 30
-#define CLK_NR_ISP_CLKS 31
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_4_H */
diff --git a/include/dt-bindings/clock/exynos5250.h b/include/dt-bindings/clock/exynos5250.h
index 4680da7357d3..2337c028bbe1 100644
--- a/include/dt-bindings/clock/exynos5250.h
+++ b/include/dt-bindings/clock/exynos5250.h
@@ -177,7 +177,4 @@
#define CLK_MOUT_MPLL 1029
#define CLK_MOUT_VPLLSRC 1030
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 1031
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5250_H */
diff --git a/include/dt-bindings/clock/exynos5260-clk.h b/include/dt-bindings/clock/exynos5260-clk.h
index 98a58cbd81b2..dfde40ea40f0 100644
--- a/include/dt-bindings/clock/exynos5260-clk.h
+++ b/include/dt-bindings/clock/exynos5260-clk.h
@@ -137,8 +137,6 @@
#define PHYCLK_USBHOST20_PHY_CLK48MOHCI 122
#define PHYCLK_USBDRD30_UDRD30_PIPE_PCLK 123
#define PHYCLK_USBDRD30_UDRD30_PHYCLOCK 124
-#define TOP_NR_CLK 125
-
/* List Of Clocks For CMU_EGL */
@@ -153,8 +151,6 @@
#define EGL_DOUT_ACLK_EGL 9
#define EGL_DOUT_EGL2 10
#define EGL_DOUT_EGL1 11
-#define EGL_NR_CLK 12
-
/* List Of Clocks For CMU_KFC */
@@ -168,8 +164,6 @@
#define KFC_DOUT_KFC_ATCLK 8
#define KFC_DOUT_KFC2 9
#define KFC_DOUT_KFC1 10
-#define KFC_NR_CLK 11
-
/* List Of Clocks For CMU_MIF */
@@ -200,8 +194,6 @@
#define MIF_CLK_INTMEM 25
#define MIF_SCLK_LPDDR3PHY_WRAP_U1 26
#define MIF_SCLK_LPDDR3PHY_WRAP_U0 27
-#define MIF_NR_CLK 28
-
/* List Of Clocks For CMU_G3D */
@@ -211,8 +203,6 @@
#define G3D_DOUT_ACLK_G3D 4
#define G3D_CLK_G3D_HPM 5
#define G3D_CLK_G3D 6
-#define G3D_NR_CLK 7
-
/* List Of Clocks For CMU_AUD */
@@ -231,8 +221,6 @@
#define AUD_SCLK_AUD_UART 13
#define AUD_SCLK_PCM 14
#define AUD_SCLK_I2S 15
-#define AUD_NR_CLK 16
-
/* List Of Clocks For CMU_MFC */
@@ -241,8 +229,6 @@
#define MFC_CLK_MFC 3
#define MFC_CLK_SMMU2_MFCM1 4
#define MFC_CLK_SMMU2_MFCM0 5
-#define MFC_NR_CLK 6
-
/* List Of Clocks For CMU_GSCL */
@@ -272,8 +258,6 @@
#define GSCL_CLK_SMMU3_MSCL1 24
#define GSCL_SCLK_CSIS1_WRAP 25
#define GSCL_SCLK_CSIS0_WRAP 26
-#define GSCL_NR_CLK 27
-
/* List Of Clocks For CMU_FSYS */
@@ -295,8 +279,6 @@
#define FSYS_CLK_SMMU_RTIC 16
#define FSYS_PHYCLK_USBDRD30 17
#define FSYS_PHYCLK_USBHOST20 18
-#define FSYS_NR_CLK 19
-
/* List Of Clocks For CMU_PERI */
@@ -366,8 +348,6 @@
#define PERI_SCLK_SPDIF 64
#define PERI_SCLK_I2S 65
#define PERI_SCLK_PCM1 66
-#define PERI_NR_CLK 67
-
/* List Of Clocks For CMU_DISP */
@@ -406,8 +386,6 @@
#define DISP_CLK_DP 33
#define DISP_SCLK_PIXEL 34
#define DISP_MOUT_HDMI_PHY_PIXEL_USER 35
-#define DISP_NR_CLK 36
-
/* List Of Clocks For CMU_G2D */
@@ -423,8 +401,6 @@
#define G2D_CLK_SMMU_SSS 10
#define G2D_CLK_SMMU_MDMA 11
#define G2D_CLK_SMMU3_G2D 12
-#define G2D_NR_CLK 13
-
/* List Of Clocks For CMU_ISP */
@@ -461,6 +437,5 @@
#define ISP_SCLK_SPI0_EXT 31
#define ISP_SCLK_SPI1_EXT 32
#define ISP_SCLK_UART_EXT 33
-#define ISP_NR_CLK 34
#endif
diff --git a/include/dt-bindings/clock/exynos5410.h b/include/dt-bindings/clock/exynos5410.h
index 86c2ad56c5ef..7a1a93f8df6c 100644
--- a/include/dt-bindings/clock/exynos5410.h
+++ b/include/dt-bindings/clock/exynos5410.h
@@ -61,6 +61,4 @@
#define CLK_USBD301 367
#define CLK_SSS 471
-#define CLK_NR_CLKS 512
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5410_H */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 9fffc6ceaadd..73e82527a9e9 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -271,7 +271,4 @@
#define CLK_DOUT_PCLK_DREX0 798
#define CLK_DOUT_PCLK_DREX1 799
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 800
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5420_H */
diff --git a/include/dt-bindings/clock/exynos5433.h b/include/dt-bindings/clock/exynos5433.h
index 25ffa53573a5..d12c1a963fa1 100644
--- a/include/dt-bindings/clock/exynos5433.h
+++ b/include/dt-bindings/clock/exynos5433.h
@@ -188,8 +188,6 @@
#define CLK_SCLK_ISP_SPI0_CAM1 252
#define CLK_SCLK_HDMI_SPDIF_DISP 253
-#define TOP_NR_CLK 254
-
/* CMU_CPIF */
#define CLK_FOUT_MPHY_PLL 1
@@ -200,8 +198,6 @@
#define CLK_SCLK_MPHY_PLL 11
#define CLK_SCLK_UFS_MPHY 11
-#define CPIF_NR_CLK 12
-
/* CMU_MIF */
#define CLK_FOUT_MEM0_PLL 1
#define CLK_FOUT_MEM1_PLL 2
@@ -396,8 +392,6 @@
#define CLK_SCLK_BUS_PLL_APOLLO 199
#define CLK_SCLK_BUS_PLL_ATLAS 200
-#define MIF_NR_CLK 201
-
/* CMU_PERIC */
#define CLK_PCLK_SPI2 1
#define CLK_PCLK_SPI1 2
@@ -468,8 +462,6 @@
#define CLK_DIV_SCLK_SCI 70
#define CLK_DIV_SCLK_SC_IN 71
-#define PERIC_NR_CLK 72
-
/* CMU_PERIS */
#define CLK_PCLK_HPM_APBIF 1
#define CLK_PCLK_TMU1_APBIF 2
@@ -513,8 +505,6 @@
#define CLK_SCLK_ANTIRBK_CNT 40
#define CLK_SCLK_OTP_CON 41
-#define PERIS_NR_CLK 42
-
/* CMU_FSYS */
#define CLK_MOUT_ACLK_FSYS_200_USER 1
#define CLK_MOUT_SCLK_MMC2_USER 2
@@ -621,8 +611,6 @@
#define CLK_SCLK_USBDRD30 114
#define CLK_PCIE 115
-#define FSYS_NR_CLK 116
-
/* CMU_G2D */
#define CLK_MUX_ACLK_G2D_266_USER 1
#define CLK_MUX_ACLK_G2D_400_USER 2
@@ -653,8 +641,6 @@
#define CLK_PCLK_G2D 25
#define CLK_PCLK_SMMU_G2D 26
-#define G2D_NR_CLK 27
-
/* CMU_DISP */
#define CLK_FOUT_DISP_PLL 1
@@ -771,8 +757,6 @@
#define CLK_PHYCLK_MIPIDPHY0_BITCLKDIV8_PHY 114
#define CLK_PHYCLK_MIPIDPHY0_RXCLKESC0_PHY 115
-#define DISP_NR_CLK 116
-
/* CMU_AUD */
#define CLK_MOUT_AUD_PLL_USER 1
#define CLK_MOUT_SCLK_AUD_PCM 2
@@ -824,8 +808,6 @@
#define CLK_SCLK_I2S_BCLK 46
#define CLK_SCLK_AUD_I2S 47
-#define AUD_NR_CLK 48
-
/* CMU_BUS{0|1|2} */
#define CLK_DIV_PCLK_BUS_133 1
@@ -840,8 +822,6 @@
#define CLK_ACLK_BUS2BEND_400 9 /* Only CMU_BUS2 */
#define CLK_ACLK_BUS2RTND_400 10 /* Only CMU_BUS2 */
-#define BUSx_NR_CLK 11
-
/* CMU_G3D */
#define CLK_FOUT_G3D_PLL 1
@@ -865,8 +845,6 @@
#define CLK_PCLK_SYSREG_G3D 18
#define CLK_SCLK_HPM_G3D 19
-#define G3D_NR_CLK 20
-
/* CMU_GSCL */
#define CLK_MOUT_ACLK_GSCL_111_USER 1
#define CLK_MOUT_ACLK_GSCL_333_USER 2
@@ -898,8 +876,6 @@
#define CLK_PCLK_SMMU_GSCL1 27
#define CLK_PCLK_SMMU_GSCL2 28
-#define GSCL_NR_CLK 29
-
/* CMU_APOLLO */
#define CLK_FOUT_APOLLO_PLL 1
@@ -935,8 +911,6 @@
#define CLK_SCLK_HPM_APOLLO 29
#define CLK_SCLK_APOLLO 30
-#define APOLLO_NR_CLK 31
-
/* CMU_ATLAS */
#define CLK_FOUT_ATLAS_PLL 1
@@ -981,8 +955,6 @@
#define CLK_ATCLK 38
#define CLK_SCLK_ATLAS 39
-#define ATLAS_NR_CLK 40
-
/* CMU_MSCL */
#define CLK_MOUT_SCLK_JPEG_USER 1
#define CLK_MOUT_ACLK_MSCL_400_USER 2
@@ -1016,8 +988,6 @@
#define CLK_PCLK_SMMU_JPEG 28
#define CLK_SCLK_JPEG 29
-#define MSCL_NR_CLK 30
-
/* CMU_MFC */
#define CLK_MOUT_ACLK_MFC_400_USER 1
@@ -1040,8 +1010,6 @@
#define CLK_PCLK_SMMU_MFC_1 17
#define CLK_PCLK_SMMU_MFC_0 18
-#define MFC_NR_CLK 19
-
/* CMU_HEVC */
#define CLK_MOUT_ACLK_HEVC_400_USER 1
@@ -1064,8 +1032,6 @@
#define CLK_PCLK_SMMU_HEVC_1 17
#define CLK_PCLK_SMMU_HEVC_0 18
-#define HEVC_NR_CLK 19
-
/* CMU_ISP */
#define CLK_MOUT_ACLK_ISP_DIS_400_USER 1
#define CLK_MOUT_ACLK_ISP_400_USER 2
@@ -1147,8 +1113,6 @@
#define CLK_SCLK_PIXELASYNCS_ISPC 76
#define CLK_SCLK_PIXELASYNCM_ISPC 77
-#define ISP_NR_CLK 78
-
/* CMU_CAM0 */
#define CLK_PHYCLK_RXBYTEECLKHS0_S4_PHY 1
#define CLK_PHYCLK_RXBYTEECLKHS0_S2A_PHY 2
@@ -1285,8 +1249,6 @@
#define CLK_SCLK_PIXELASYNCM_LITE_C_INIT 132
#define CLK_SCLK_PIXELASYNCS_LITE_C_INIT 133
-#define CAM0_NR_CLK 134
-
/* CMU_CAM1 */
#define CLK_PHYCLK_RXBYTEECLKHS0_S2B 1
@@ -1404,12 +1366,8 @@
#define CLK_ATCLK_ISP 111
#define CLK_SCLK_ISP_CA5 112
-#define CAM1_NR_CLK 113
-
/* CMU_IMEM */
#define CLK_ACLK_SLIMSSS 2
#define CLK_PCLK_SLIMSSS 35
-#define IMEM_NR_CLK 36
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOS5433_H */
diff --git a/include/dt-bindings/clock/exynos7885.h b/include/dt-bindings/clock/exynos7885.h
index 8256e7430b63..255e3aa94323 100644
--- a/include/dt-bindings/clock/exynos7885.h
+++ b/include/dt-bindings/clock/exynos7885.h
@@ -69,7 +69,6 @@
#define CLK_GOUT_FSYS_MMC_EMBD 58
#define CLK_GOUT_FSYS_MMC_SDIO 59
#define CLK_GOUT_FSYS_USB30DRD 60
-#define TOP_NR_CLK 61
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
@@ -86,7 +85,6 @@
#define CLK_GOUT_TREX_P_CORE_CCLK_P_CORE 12
#define CLK_GOUT_TREX_P_CORE_PCLK 13
#define CLK_GOUT_TREX_P_CORE_PCLK_P_CORE 14
-#define CORE_NR_CLK 15
/* CMU_PERI */
#define CLK_MOUT_PERI_BUS_USER 1
@@ -132,7 +130,6 @@
#define CLK_GOUT_SYSREG_PERI_PCLK 41
#define CLK_GOUT_WDT0_PCLK 42
#define CLK_GOUT_WDT1_PCLK 43
-#define PERI_NR_CLK 44
/* CMU_FSYS */
#define CLK_MOUT_FSYS_BUS_USER 1
@@ -146,6 +143,5 @@
#define CLK_GOUT_MMC_EMBD_SDCLKIN 8
#define CLK_GOUT_MMC_SDIO_ACLK 9
#define CLK_GOUT_MMC_SDIO_SDCLKIN 10
-#define FSYS_NR_CLK 11
#endif /* _DT_BINDINGS_CLOCK_EXYNOS_7885_H */
diff --git a/include/dt-bindings/clock/exynos850.h b/include/dt-bindings/clock/exynos850.h
index afacba338c91..3090e09c9a55 100644
--- a/include/dt-bindings/clock/exynos850.h
+++ b/include/dt-bindings/clock/exynos850.h
@@ -88,7 +88,6 @@
#define CLK_MOUT_G3D_SWITCH 76
#define CLK_GOUT_G3D_SWITCH 77
#define CLK_DOUT_G3D_SWITCH 78
-#define TOP_NR_CLK 79
/* CMU_APM */
#define CLK_RCO_I3C_PMIC 1
@@ -115,7 +114,6 @@
#define CLK_GOUT_GPIO_ALIVE_PCLK 22
#define CLK_GOUT_PMU_ALIVE_PCLK 23
#define CLK_GOUT_SYSREG_APM_PCLK 24
-#define APM_NR_CLK 25
/* CMU_AUD */
#define CLK_DOUT_AUD_AUDIF 1
@@ -179,7 +177,6 @@
#define IOCLK_AUDIOCDCLK6 59
#define TICK_USB 60
#define CLK_GOUT_AUD_CMU_AUD_PCLK 61
-#define AUD_NR_CLK 62
/* CMU_CMGP */
#define CLK_RCO_CMGP 1
@@ -197,7 +194,6 @@
#define CLK_GOUT_CMGP_USI1_IPCLK 13
#define CLK_GOUT_CMGP_USI1_PCLK 14
#define CLK_GOUT_SYSREG_CMGP_PCLK 15
-#define CMGP_NR_CLK 16
/* CMU_G3D */
#define CLK_FOUT_G3D_PLL 1
@@ -212,7 +208,6 @@
#define CLK_GOUT_G3D_BUSD_CLK 10
#define CLK_GOUT_G3D_BUSP_CLK 11
#define CLK_GOUT_G3D_SYSREG_PCLK 12
-#define G3D_NR_CLK 13
/* CMU_HSI */
#define CLK_MOUT_HSI_BUS_USER 1
@@ -231,7 +226,6 @@
#define CLK_GOUT_HSI_PPMU_ACLK 14
#define CLK_GOUT_HSI_PPMU_PCLK 15
#define CLK_GOUT_HSI_CMU_HSI_PCLK 16
-#define HSI_NR_CLK 17
/* CMU_IS */
#define CLK_MOUT_IS_BUS_USER 1
@@ -257,7 +251,6 @@
#define CLK_GOUT_IS_SYSMMU_IS0_CLK 21
#define CLK_GOUT_IS_SYSMMU_IS1_CLK 22
#define CLK_GOUT_IS_SYSREG_PCLK 23
-#define IS_NR_CLK 24
/* CMU_MFCMSCL */
#define CLK_MOUT_MFCMSCL_MFC_USER 1
@@ -275,7 +268,6 @@
#define CLK_GOUT_MFCMSCL_PPMU_PCLK 13
#define CLK_GOUT_MFCMSCL_SYSMMU_CLK 14
#define CLK_GOUT_MFCMSCL_SYSREG_PCLK 15
-#define MFCMSCL_NR_CLK 16
/* CMU_PERI */
#define CLK_MOUT_PERI_BUS_USER 1
@@ -312,7 +304,6 @@
#define CLK_GOUT_UART_PCLK 32
#define CLK_GOUT_WDT0_PCLK 33
#define CLK_GOUT_WDT1_PCLK 34
-#define PERI_NR_CLK 35
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
@@ -329,7 +320,6 @@
#define CLK_GOUT_SSS_PCLK 12
#define CLK_GOUT_GPIO_CORE_PCLK 13
#define CLK_GOUT_SYSREG_CORE_PCLK 14
-#define CORE_NR_CLK 15
/* CMU_DPU */
#define CLK_MOUT_DPU_USER 1
diff --git a/include/dt-bindings/clock/g12a-aoclkc.h b/include/dt-bindings/clock/g12a-aoclkc.h
index e916e49ff288..8fe7712fb12d 100644
--- a/include/dt-bindings/clock/g12a-aoclkc.h
+++ b/include/dt-bindings/clock/g12a-aoclkc.h
@@ -26,10 +26,17 @@
#define CLKID_AO_M4_FCLK 13
#define CLKID_AO_M4_HCLK 14
#define CLKID_AO_CLK81 15
+#define CLKID_AO_SAR_ADC_DIV 17
#define CLKID_AO_SAR_ADC_SEL 16
#define CLKID_AO_SAR_ADC_CLK 18
#define CLKID_AO_CTS_OSCIN 19
+#define CLKID_AO_32K_PRE 20
+#define CLKID_AO_32K_DIV 21
+#define CLKID_AO_32K_SEL 22
#define CLKID_AO_32K 23
+#define CLKID_AO_CEC_PRE 24
+#define CLKID_AO_CEC_DIV 25
+#define CLKID_AO_CEC_SEL 26
#define CLKID_AO_CEC 27
#define CLKID_AO_CTS_RTC_OSCIN 28
diff --git a/include/dt-bindings/clock/g12a-clkc.h b/include/dt-bindings/clock/g12a-clkc.h
index a93b58c5e18e..387767f4e298 100644
--- a/include/dt-bindings/clock/g12a-clkc.h
+++ b/include/dt-bindings/clock/g12a-clkc.h
@@ -16,6 +16,8 @@
#define CLKID_FCLK_DIV5 5
#define CLKID_FCLK_DIV7 6
#define CLKID_GP0_PLL 7
+#define CLKID_MPEG_SEL 8
+#define CLKID_MPEG_DIV 9
#define CLKID_CLK81 10
#define CLKID_MPLL0 11
#define CLKID_MPLL1 12
@@ -69,7 +71,23 @@
#define CLKID_SD_EMMC_A_CLK0 60
#define CLKID_SD_EMMC_B_CLK0 61
#define CLKID_SD_EMMC_C_CLK0 62
+#define CLKID_SD_EMMC_A_CLK0_SEL 63
+#define CLKID_SD_EMMC_A_CLK0_DIV 64
+#define CLKID_SD_EMMC_B_CLK0_SEL 65
+#define CLKID_SD_EMMC_B_CLK0_DIV 66
+#define CLKID_SD_EMMC_C_CLK0_SEL 67
+#define CLKID_SD_EMMC_C_CLK0_DIV 68
+#define CLKID_MPLL0_DIV 69
+#define CLKID_MPLL1_DIV 70
+#define CLKID_MPLL2_DIV 71
+#define CLKID_MPLL3_DIV 72
+#define CLKID_MPLL_PREDIV 73
#define CLKID_HIFI_PLL 74
+#define CLKID_FCLK_DIV2_DIV 75
+#define CLKID_FCLK_DIV3_DIV 76
+#define CLKID_FCLK_DIV4_DIV 77
+#define CLKID_FCLK_DIV5_DIV 78
+#define CLKID_FCLK_DIV7_DIV 79
#define CLKID_VCLK2_VENCI0 80
#define CLKID_VCLK2_VENCI1 81
#define CLKID_VCLK2_VENCP0 82
@@ -90,26 +108,54 @@
#define CLKID_VCLK2_VENCL 97
#define CLKID_VCLK2_OTHER1 98
#define CLKID_FCLK_DIV2P5 99
+#define CLKID_FCLK_DIV2P5_DIV 100
+#define CLKID_FIXED_PLL_DCO 101
+#define CLKID_SYS_PLL_DCO 102
+#define CLKID_GP0_PLL_DCO 103
+#define CLKID_HIFI_PLL_DCO 104
#define CLKID_DMA 105
#define CLKID_EFUSE 106
#define CLKID_ROM_BOOT 107
#define CLKID_RESET_SEC 108
#define CLKID_SEC_AHB_APB3 109
#define CLKID_VPU_0_SEL 110
+#define CLKID_VPU_0_DIV 111
#define CLKID_VPU_0 112
#define CLKID_VPU_1_SEL 113
+#define CLKID_VPU_1_DIV 114
#define CLKID_VPU_1 115
#define CLKID_VPU 116
#define CLKID_VAPB_0_SEL 117
+#define CLKID_VAPB_0_DIV 118
#define CLKID_VAPB_0 119
#define CLKID_VAPB_1_SEL 120
+#define CLKID_VAPB_1_DIV 121
#define CLKID_VAPB_1 122
#define CLKID_VAPB_SEL 123
#define CLKID_VAPB 124
+#define CLKID_HDMI_PLL_DCO 125
+#define CLKID_HDMI_PLL_OD 126
+#define CLKID_HDMI_PLL_OD2 127
#define CLKID_HDMI_PLL 128
#define CLKID_VID_PLL 129
+#define CLKID_VID_PLL_SEL 130
+#define CLKID_VID_PLL_DIV 131
+#define CLKID_VCLK_SEL 132
+#define CLKID_VCLK2_SEL 133
+#define CLKID_VCLK_INPUT 134
+#define CLKID_VCLK2_INPUT 135
+#define CLKID_VCLK_DIV 136
+#define CLKID_VCLK2_DIV 137
#define CLKID_VCLK 138
#define CLKID_VCLK2 139
+#define CLKID_VCLK_DIV2_EN 140
+#define CLKID_VCLK_DIV4_EN 141
+#define CLKID_VCLK_DIV6_EN 142
+#define CLKID_VCLK_DIV12_EN 143
+#define CLKID_VCLK2_DIV2_EN 144
+#define CLKID_VCLK2_DIV4_EN 145
+#define CLKID_VCLK2_DIV6_EN 146
+#define CLKID_VCLK2_DIV12_EN 147
#define CLKID_VCLK_DIV1 148
#define CLKID_VCLK_DIV2 149
#define CLKID_VCLK_DIV4 150
@@ -120,33 +166,117 @@
#define CLKID_VCLK2_DIV4 155
#define CLKID_VCLK2_DIV6 156
#define CLKID_VCLK2_DIV12 157
+#define CLKID_CTS_ENCI_SEL 158
+#define CLKID_CTS_ENCP_SEL 159
+#define CLKID_CTS_VDAC_SEL 160
+#define CLKID_HDMI_TX_SEL 161
#define CLKID_CTS_ENCI 162
#define CLKID_CTS_ENCP 163
#define CLKID_CTS_VDAC 164
#define CLKID_HDMI_TX 165
+#define CLKID_HDMI_SEL 166
+#define CLKID_HDMI_DIV 167
#define CLKID_HDMI 168
#define CLKID_MALI_0_SEL 169
+#define CLKID_MALI_0_DIV 170
#define CLKID_MALI_0 171
#define CLKID_MALI_1_SEL 172
+#define CLKID_MALI_1_DIV 173
#define CLKID_MALI_1 174
#define CLKID_MALI 175
+#define CLKID_MPLL_50M_DIV 176
#define CLKID_MPLL_50M 177
+#define CLKID_SYS_PLL_DIV16_EN 178
+#define CLKID_SYS_PLL_DIV16 179
+#define CLKID_CPU_CLK_DYN0_SEL 180
+#define CLKID_CPU_CLK_DYN0_DIV 181
+#define CLKID_CPU_CLK_DYN0 182
+#define CLKID_CPU_CLK_DYN1_SEL 183
+#define CLKID_CPU_CLK_DYN1_DIV 184
+#define CLKID_CPU_CLK_DYN1 185
+#define CLKID_CPU_CLK_DYN 186
#define CLKID_CPU_CLK 187
+#define CLKID_CPU_CLK_DIV16_EN 188
+#define CLKID_CPU_CLK_DIV16 189
+#define CLKID_CPU_CLK_APB_DIV 190
+#define CLKID_CPU_CLK_APB 191
+#define CLKID_CPU_CLK_ATB_DIV 192
+#define CLKID_CPU_CLK_ATB 193
+#define CLKID_CPU_CLK_AXI_DIV 194
+#define CLKID_CPU_CLK_AXI 195
+#define CLKID_CPU_CLK_TRACE_DIV 196
+#define CLKID_CPU_CLK_TRACE 197
+#define CLKID_PCIE_PLL_DCO 198
+#define CLKID_PCIE_PLL_DCO_DIV2 199
+#define CLKID_PCIE_PLL_OD 200
#define CLKID_PCIE_PLL 201
+#define CLKID_VDEC_1_SEL 202
+#define CLKID_VDEC_1_DIV 203
#define CLKID_VDEC_1 204
+#define CLKID_VDEC_HEVC_SEL 205
+#define CLKID_VDEC_HEVC_DIV 206
#define CLKID_VDEC_HEVC 207
+#define CLKID_VDEC_HEVCF_SEL 208
+#define CLKID_VDEC_HEVCF_DIV 209
#define CLKID_VDEC_HEVCF 210
+#define CLKID_TS_DIV 211
#define CLKID_TS 212
+#define CLKID_SYS1_PLL_DCO 213
+#define CLKID_SYS1_PLL 214
+#define CLKID_SYS1_PLL_DIV16_EN 215
+#define CLKID_SYS1_PLL_DIV16 216
+#define CLKID_CPUB_CLK_DYN0_SEL 217
+#define CLKID_CPUB_CLK_DYN0_DIV 218
+#define CLKID_CPUB_CLK_DYN0 219
+#define CLKID_CPUB_CLK_DYN1_SEL 220
+#define CLKID_CPUB_CLK_DYN1_DIV 221
+#define CLKID_CPUB_CLK_DYN1 222
+#define CLKID_CPUB_CLK_DYN 223
#define CLKID_CPUB_CLK 224
+#define CLKID_CPUB_CLK_DIV16_EN 225
+#define CLKID_CPUB_CLK_DIV16 226
+#define CLKID_CPUB_CLK_DIV2 227
+#define CLKID_CPUB_CLK_DIV3 228
+#define CLKID_CPUB_CLK_DIV4 229
+#define CLKID_CPUB_CLK_DIV5 230
+#define CLKID_CPUB_CLK_DIV6 231
+#define CLKID_CPUB_CLK_DIV7 232
+#define CLKID_CPUB_CLK_DIV8 233
+#define CLKID_CPUB_CLK_APB_SEL 234
+#define CLKID_CPUB_CLK_APB 235
+#define CLKID_CPUB_CLK_ATB_SEL 236
+#define CLKID_CPUB_CLK_ATB 237
+#define CLKID_CPUB_CLK_AXI_SEL 238
+#define CLKID_CPUB_CLK_AXI 239
+#define CLKID_CPUB_CLK_TRACE_SEL 240
+#define CLKID_CPUB_CLK_TRACE 241
+#define CLKID_GP1_PLL_DCO 242
#define CLKID_GP1_PLL 243
+#define CLKID_DSU_CLK_DYN0_SEL 244
+#define CLKID_DSU_CLK_DYN0_DIV 245
+#define CLKID_DSU_CLK_DYN0 246
+#define CLKID_DSU_CLK_DYN1_SEL 247
+#define CLKID_DSU_CLK_DYN1_DIV 248
+#define CLKID_DSU_CLK_DYN1 249
+#define CLKID_DSU_CLK_DYN 250
+#define CLKID_DSU_CLK_FINAL 251
#define CLKID_DSU_CLK 252
#define CLKID_CPU1_CLK 253
#define CLKID_CPU2_CLK 254
#define CLKID_CPU3_CLK 255
+#define CLKID_SPICC0_SCLK_SEL 256
+#define CLKID_SPICC0_SCLK_DIV 257
#define CLKID_SPICC0_SCLK 258
+#define CLKID_SPICC1_SCLK_SEL 259
+#define CLKID_SPICC1_SCLK_DIV 260
#define CLKID_SPICC1_SCLK 261
+#define CLKID_NNA_AXI_CLK_SEL 262
+#define CLKID_NNA_AXI_CLK_DIV 263
#define CLKID_NNA_AXI_CLK 264
+#define CLKID_NNA_CORE_CLK_SEL 265
+#define CLKID_NNA_CORE_CLK_DIV 266
#define CLKID_NNA_CORE_CLK 267
+#define CLKID_MIPI_DSI_PXCLK_DIV 268
#define CLKID_MIPI_DSI_PXCLK_SEL 269
#define CLKID_MIPI_DSI_PXCLK 270
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 4073eb7a9da1..c0ce5e9c4151 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -15,6 +15,8 @@
#define CLKID_FCLK_DIV5 7
#define CLKID_FCLK_DIV7 8
#define CLKID_GP0_PLL 9
+#define CLKID_MPEG_SEL 10
+#define CLKID_MPEG_DIV 11
#define CLKID_CLK81 12
#define CLKID_MPLL0 13
#define CLKID_MPLL1 14
@@ -102,35 +104,92 @@
#define CLKID_SD_EMMC_C 96
#define CLKID_SAR_ADC_CLK 97
#define CLKID_SAR_ADC_SEL 98
+#define CLKID_SAR_ADC_DIV 99
#define CLKID_MALI_0_SEL 100
+#define CLKID_MALI_0_DIV 101
#define CLKID_MALI_0 102
#define CLKID_MALI_1_SEL 103
+#define CLKID_MALI_1_DIV 104
#define CLKID_MALI_1 105
#define CLKID_MALI 106
#define CLKID_CTS_AMCLK 107
+#define CLKID_CTS_AMCLK_SEL 108
+#define CLKID_CTS_AMCLK_DIV 109
#define CLKID_CTS_MCLK_I958 110
+#define CLKID_CTS_MCLK_I958_SEL 111
+#define CLKID_CTS_MCLK_I958_DIV 112
#define CLKID_CTS_I958 113
#define CLKID_32K_CLK 114
+#define CLKID_32K_CLK_SEL 115
+#define CLKID_32K_CLK_DIV 116
+#define CLKID_SD_EMMC_A_CLK0_SEL 117
+#define CLKID_SD_EMMC_A_CLK0_DIV 118
#define CLKID_SD_EMMC_A_CLK0 119
+#define CLKID_SD_EMMC_B_CLK0_SEL 120
+#define CLKID_SD_EMMC_B_CLK0_DIV 121
#define CLKID_SD_EMMC_B_CLK0 122
+#define CLKID_SD_EMMC_C_CLK0_SEL 123
+#define CLKID_SD_EMMC_C_CLK0_DIV 124
#define CLKID_SD_EMMC_C_CLK0 125
#define CLKID_VPU_0_SEL 126
+#define CLKID_VPU_0_DIV 127
#define CLKID_VPU_0 128
#define CLKID_VPU_1_SEL 129
+#define CLKID_VPU_1_DIV 130
#define CLKID_VPU_1 131
#define CLKID_VPU 132
#define CLKID_VAPB_0_SEL 133
+#define CLKID_VAPB_0_DIV 134
#define CLKID_VAPB_0 135
#define CLKID_VAPB_1_SEL 136
+#define CLKID_VAPB_1_DIV 137
#define CLKID_VAPB_1 138
#define CLKID_VAPB_SEL 139
#define CLKID_VAPB 140
+#define CLKID_HDMI_PLL_PRE_MULT 141
+#define CLKID_MPLL0_DIV 142
+#define CLKID_MPLL1_DIV 143
+#define CLKID_MPLL2_DIV 144
+#define CLKID_MPLL_PREDIV 145
+#define CLKID_FCLK_DIV2_DIV 146
+#define CLKID_FCLK_DIV3_DIV 147
+#define CLKID_FCLK_DIV4_DIV 148
+#define CLKID_FCLK_DIV5_DIV 149
+#define CLKID_FCLK_DIV7_DIV 150
+#define CLKID_VDEC_1_SEL 151
+#define CLKID_VDEC_1_DIV 152
#define CLKID_VDEC_1 153
+#define CLKID_VDEC_HEVC_SEL 154
+#define CLKID_VDEC_HEVC_DIV 155
#define CLKID_VDEC_HEVC 156
+#define CLKID_GEN_CLK_SEL 157
+#define CLKID_GEN_CLK_DIV 158
#define CLKID_GEN_CLK 159
+#define CLKID_FIXED_PLL_DCO 160
+#define CLKID_HDMI_PLL_DCO 161
+#define CLKID_HDMI_PLL_OD 162
+#define CLKID_HDMI_PLL_OD2 163
+#define CLKID_SYS_PLL_DCO 164
+#define CLKID_GP0_PLL_DCO 165
#define CLKID_VID_PLL 166
+#define CLKID_VID_PLL_SEL 167
+#define CLKID_VID_PLL_DIV 168
+#define CLKID_VCLK_SEL 169
+#define CLKID_VCLK2_SEL 170
+#define CLKID_VCLK_INPUT 171
+#define CLKID_VCLK2_INPUT 172
+#define CLKID_VCLK_DIV 173
+#define CLKID_VCLK2_DIV 174
#define CLKID_VCLK 175
#define CLKID_VCLK2 176
+#define CLKID_VCLK_DIV2_EN 177
+#define CLKID_VCLK_DIV4_EN 178
+#define CLKID_VCLK_DIV6_EN 179
+#define CLKID_VCLK_DIV12_EN 180
+#define CLKID_VCLK2_DIV2_EN 181
+#define CLKID_VCLK2_DIV4_EN 182
+#define CLKID_VCLK2_DIV6_EN 183
+#define CLKID_VCLK2_DIV12_EN 184
#define CLKID_VCLK_DIV1 185
#define CLKID_VCLK_DIV2 186
#define CLKID_VCLK_DIV4 187
@@ -141,10 +200,16 @@
#define CLKID_VCLK2_DIV4 192
#define CLKID_VCLK2_DIV6 193
#define CLKID_VCLK2_DIV12 194
+#define CLKID_CTS_ENCI_SEL 195
+#define CLKID_CTS_ENCP_SEL 196
+#define CLKID_CTS_VDAC_SEL 197
+#define CLKID_HDMI_TX_SEL 198
#define CLKID_CTS_ENCI 199
#define CLKID_CTS_ENCP 200
#define CLKID_CTS_VDAC 201
#define CLKID_HDMI_TX 202
+#define CLKID_HDMI_SEL 203
+#define CLKID_HDMI_DIV 204
#define CLKID_HDMI 205
#define CLKID_ACODEC 206
diff --git a/include/dt-bindings/clock/hi3559av100-clock.h b/include/dt-bindings/clock/hi3559av100-clock.h
index 5fe7689010a0..a4f0e997546c 100644
--- a/include/dt-bindings/clock/hi3559av100-clock.h
+++ b/include/dt-bindings/clock/hi3559av100-clock.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later or BSD-2-Clause */
+/* SPDX-License-Identifier: GPL-2.0-or-later OR BSD-2-Clause */
/*
* Copyright (c) 2019-2020, Huawei Tech. Co., Ltd.
*
diff --git a/include/dt-bindings/clock/imx8-clock.h b/include/dt-bindings/clock/imx8-clock.h
index 2e60ce4d2622..2242ff54fc5e 100644
--- a/include/dt-bindings/clock/imx8-clock.h
+++ b/include/dt-bindings/clock/imx8-clock.h
@@ -164,4 +164,32 @@
#define IMX_ADMA_LPCG_CLK_END 45
+#define IMX_ADMA_ACM_AUD_CLK0_SEL 0
+#define IMX_ADMA_ACM_AUD_CLK1_SEL 1
+#define IMX_ADMA_ACM_MCLKOUT0_SEL 2
+#define IMX_ADMA_ACM_MCLKOUT1_SEL 3
+#define IMX_ADMA_ACM_ESAI0_MCLK_SEL 4
+#define IMX_ADMA_ACM_ESAI1_MCLK_SEL 5
+#define IMX_ADMA_ACM_GPT0_MUX_CLK_SEL 6
+#define IMX_ADMA_ACM_GPT1_MUX_CLK_SEL 7
+#define IMX_ADMA_ACM_GPT2_MUX_CLK_SEL 8
+#define IMX_ADMA_ACM_GPT3_MUX_CLK_SEL 9
+#define IMX_ADMA_ACM_GPT4_MUX_CLK_SEL 10
+#define IMX_ADMA_ACM_GPT5_MUX_CLK_SEL 11
+#define IMX_ADMA_ACM_SAI0_MCLK_SEL 12
+#define IMX_ADMA_ACM_SAI1_MCLK_SEL 13
+#define IMX_ADMA_ACM_SAI2_MCLK_SEL 14
+#define IMX_ADMA_ACM_SAI3_MCLK_SEL 15
+#define IMX_ADMA_ACM_SAI4_MCLK_SEL 16
+#define IMX_ADMA_ACM_SAI5_MCLK_SEL 17
+#define IMX_ADMA_ACM_SAI6_MCLK_SEL 18
+#define IMX_ADMA_ACM_SAI7_MCLK_SEL 19
+#define IMX_ADMA_ACM_SPDIF0_TX_CLK_SEL 20
+#define IMX_ADMA_ACM_SPDIF1_TX_CLK_SEL 21
+#define IMX_ADMA_ACM_MQS_TX_CLK_SEL 22
+#define IMX_ADMA_ACM_ASRC0_MUX_CLK_SEL 23
+#define IMX_ADMA_ACM_ASRC1_MUX_CLK_SEL 24
+
+#define IMX_ADMA_ACM_CLK_END 25
+
#endif /* __DT_BINDINGS_CLOCK_IMX_H */
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
index 3f28ce685f41..11cb0a4fe999 100644
--- a/include/dt-bindings/clock/imx8mp-clock.h
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -130,7 +130,7 @@
#define IMX8MP_CLK_SAI1 123
#define IMX8MP_CLK_SAI2 124
#define IMX8MP_CLK_SAI3 125
-#define IMX8MP_CLK_SAI4 126
+/* #define IMX8MP_CLK_SAI4 126 */
#define IMX8MP_CLK_SAI5 127
#define IMX8MP_CLK_SAI6 128
#define IMX8MP_CLK_ENET_QOS 129
diff --git a/include/dt-bindings/clock/imx93-clock.h b/include/dt-bindings/clock/imx93-clock.h
index 35a1f62053a5..787c9e74dc96 100644
--- a/include/dt-bindings/clock/imx93-clock.h
+++ b/include/dt-bindings/clock/imx93-clock.h
@@ -203,6 +203,7 @@
#define IMX93_CLK_ARM_PLL 198
#define IMX93_CLK_A55_SEL 199
#define IMX93_CLK_A55_CORE 200
-#define IMX93_CLK_END 201
+#define IMX93_CLK_PDM_IPG 201
+#define IMX93_CLK_END 202
#endif
diff --git a/include/dt-bindings/clock/intel,agilex5-clkmgr.h b/include/dt-bindings/clock/intel,agilex5-clkmgr.h
new file mode 100644
index 000000000000..2f3a23b31c5c
--- /dev/null
+++ b/include/dt-bindings/clock/intel,agilex5-clkmgr.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
+/*
+ * Copyright (C) 2023, Intel Corporation
+ */
+
+#ifndef __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H
+#define __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H
+
+/* fixed rate clocks */
+#define AGILEX5_OSC1 0
+#define AGILEX5_CB_INTOSC_HS_DIV2_CLK 1
+#define AGILEX5_CB_INTOSC_LS_CLK 2
+#define AGILEX5_F2S_FREE_CLK 3
+
+/* PLL clocks */
+#define AGILEX5_MAIN_PLL_CLK 4
+#define AGILEX5_MAIN_PLL_C0_CLK 5
+#define AGILEX5_MAIN_PLL_C1_CLK 6
+#define AGILEX5_MAIN_PLL_C2_CLK 7
+#define AGILEX5_MAIN_PLL_C3_CLK 8
+#define AGILEX5_PERIPH_PLL_CLK 9
+#define AGILEX5_PERIPH_PLL_C0_CLK 10
+#define AGILEX5_PERIPH_PLL_C1_CLK 11
+#define AGILEX5_PERIPH_PLL_C2_CLK 12
+#define AGILEX5_PERIPH_PLL_C3_CLK 13
+#define AGILEX5_CORE0_FREE_CLK 14
+#define AGILEX5_CORE1_FREE_CLK 15
+#define AGILEX5_CORE2_FREE_CLK 16
+#define AGILEX5_CORE3_FREE_CLK 17
+#define AGILEX5_DSU_FREE_CLK 18
+#define AGILEX5_BOOT_CLK 19
+
+/* fixed factor clocks */
+#define AGILEX5_L3_MAIN_FREE_CLK 20
+#define AGILEX5_NOC_FREE_CLK 21
+#define AGILEX5_S2F_USR0_CLK 22
+#define AGILEX5_NOC_CLK 23
+#define AGILEX5_EMAC_A_FREE_CLK 24
+#define AGILEX5_EMAC_B_FREE_CLK 25
+#define AGILEX5_EMAC_PTP_FREE_CLK 26
+#define AGILEX5_GPIO_DB_FREE_CLK 27
+#define AGILEX5_S2F_USER0_FREE_CLK 28
+#define AGILEX5_S2F_USER1_FREE_CLK 29
+#define AGILEX5_PSI_REF_FREE_CLK 30
+#define AGILEX5_USB31_FREE_CLK 31
+
+/* Gate clocks */
+#define AGILEX5_CORE0_CLK 32
+#define AGILEX5_CORE1_CLK 33
+#define AGILEX5_CORE2_CLK 34
+#define AGILEX5_CORE3_CLK 35
+#define AGILEX5_MPU_CLK 36
+#define AGILEX5_MPU_PERIPH_CLK 37
+#define AGILEX5_MPU_CCU_CLK 38
+#define AGILEX5_L4_MAIN_CLK 39
+#define AGILEX5_L4_MP_CLK 40
+#define AGILEX5_L4_SYS_FREE_CLK 41
+#define AGILEX5_L4_SP_CLK 42
+#define AGILEX5_CS_AT_CLK 43
+#define AGILEX5_CS_TRACE_CLK 44
+#define AGILEX5_CS_PDBG_CLK 45
+#define AGILEX5_EMAC1_CLK 47
+#define AGILEX5_EMAC2_CLK 48
+#define AGILEX5_EMAC_PTP_CLK 49
+#define AGILEX5_GPIO_DB_CLK 50
+#define AGILEX5_S2F_USER0_CLK 51
+#define AGILEX5_S2F_USER1_CLK 52
+#define AGILEX5_PSI_REF_CLK 53
+#define AGILEX5_USB31_SUSPEND_CLK 54
+#define AGILEX5_EMAC0_CLK 46
+#define AGILEX5_USB31_BUS_CLK_EARLY 55
+#define AGILEX5_USB2OTG_HCLK 56
+#define AGILEX5_SPIM_0_CLK 57
+#define AGILEX5_SPIM_1_CLK 58
+#define AGILEX5_SPIS_0_CLK 59
+#define AGILEX5_SPIS_1_CLK 60
+#define AGILEX5_DMA_CORE_CLK 61
+#define AGILEX5_DMA_HS_CLK 62
+#define AGILEX5_I3C_0_CORE_CLK 63
+#define AGILEX5_I3C_1_CORE_CLK 64
+#define AGILEX5_I2C_0_PCLK 65
+#define AGILEX5_I2C_1_PCLK 66
+#define AGILEX5_I2C_EMAC0_PCLK 67
+#define AGILEX5_I2C_EMAC1_PCLK 68
+#define AGILEX5_I2C_EMAC2_PCLK 69
+#define AGILEX5_UART_0_PCLK 70
+#define AGILEX5_UART_1_PCLK 71
+#define AGILEX5_SPTIMER_0_PCLK 72
+#define AGILEX5_SPTIMER_1_PCLK 73
+#define AGILEX5_DFI_CLK 74
+#define AGILEX5_NAND_NF_CLK 75
+#define AGILEX5_NAND_BCH_CLK 76
+#define AGILEX5_SDMMC_SDPHY_REG_CLK 77
+#define AGILEX5_SDMCLK 78
+#define AGILEX5_SOFTPHY_REG_PCLK 79
+#define AGILEX5_SOFTPHY_PHY_CLK 80
+#define AGILEX5_SOFTPHY_CTRL_CLK 81
+#define AGILEX5_NUM_CLKS 82
+
+#endif /* __DT_BINDINGS_INTEL_AGILEX5_CLKMGR_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2-audio.h b/include/dt-bindings/clock/marvell,mmp2-audio.h
index 20664776f497..9653e04dedc3 100644
--- a/include/dt-bindings/clock/marvell,mmp2-audio.h
+++ b/include/dt-bindings/clock/marvell,mmp2-audio.h
@@ -6,5 +6,4 @@
#define MMP2_CLK_AUDIO_SSPA0 1
#define MMP2_CLK_AUDIO_SSPA1 2
-#define MMP2_CLK_AUDIO_NR_CLKS 3
#endif
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index f0819d66b230..88c2d716476f 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -91,5 +91,4 @@
#define MMP3_CLK_SDH4 126
#define MMP2_CLK_AUDIO 127
-#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa168.h b/include/dt-bindings/clock/marvell,pxa168.h
index c92d969ae941..d1bb59187e1d 100644
--- a/include/dt-bindings/clock/marvell,pxa168.h
+++ b/include/dt-bindings/clock/marvell,pxa168.h
@@ -63,5 +63,4 @@
#define PXA168_CLK_SDH01_AXI 111
#define PXA168_CLK_SDH23_AXI 112
-#define PXA168_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa1928.h b/include/dt-bindings/clock/marvell,pxa1928.h
index 5dca4820297f..0c708d3d3314 100644
--- a/include/dt-bindings/clock/marvell,pxa1928.h
+++ b/include/dt-bindings/clock/marvell,pxa1928.h
@@ -36,7 +36,6 @@
#define PXA1928_CLK_THSENS_CPU 0x26
#define PXA1928_CLK_THSENS_VPU 0x27
#define PXA1928_CLK_THSENS_GC 0x28
-#define PXA1928_APBC_NR_CLKS 0x30
/* axi peripherals */
@@ -53,6 +52,4 @@
#define PXA1928_CLK_GC3D 0x5d
#define PXA1928_CLK_GC2D 0x5f
-#define PXA1928_APMU_NR_CLKS 0x60
-
#endif
diff --git a/include/dt-bindings/clock/marvell,pxa910.h b/include/dt-bindings/clock/marvell,pxa910.h
index c9018ab354d0..6caa231de0c1 100644
--- a/include/dt-bindings/clock/marvell,pxa910.h
+++ b/include/dt-bindings/clock/marvell,pxa910.h
@@ -55,5 +55,4 @@
#define PXA910_CLK_CCIC0_PHY 108
#define PXA910_CLK_CCIC0_SPHY 109
-#define PXA910_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/meson8b-clkc.h b/include/dt-bindings/clock/meson8b-clkc.h
index 78aa07fd7cc0..385bf243c56c 100644
--- a/include/dt-bindings/clock/meson8b-clkc.h
+++ b/include/dt-bindings/clock/meson8b-clkc.h
@@ -100,29 +100,126 @@
#define CLKID_MPLL0 93
#define CLKID_MPLL1 94
#define CLKID_MPLL2 95
+#define CLKID_MPLL0_DIV 96
+#define CLKID_MPLL1_DIV 97
+#define CLKID_MPLL2_DIV 98
+#define CLKID_CPU_IN_SEL 99
+#define CLKID_CPU_IN_DIV2 100
+#define CLKID_CPU_IN_DIV3 101
+#define CLKID_CPU_SCALE_DIV 102
+#define CLKID_CPU_SCALE_OUT_SEL 103
+#define CLKID_MPLL_PREDIV 104
+#define CLKID_FCLK_DIV2_DIV 105
+#define CLKID_FCLK_DIV3_DIV 106
+#define CLKID_FCLK_DIV4_DIV 107
+#define CLKID_FCLK_DIV5_DIV 108
+#define CLKID_FCLK_DIV7_DIV 109
+#define CLKID_NAND_SEL 110
+#define CLKID_NAND_DIV 111
#define CLKID_NAND_CLK 112
+#define CLKID_PLL_FIXED_DCO 113
+#define CLKID_HDMI_PLL_DCO 114
+#define CLKID_PLL_SYS_DCO 115
+#define CLKID_CPU_CLK_DIV2 116
+#define CLKID_CPU_CLK_DIV3 117
+#define CLKID_CPU_CLK_DIV4 118
+#define CLKID_CPU_CLK_DIV5 119
+#define CLKID_CPU_CLK_DIV6 120
+#define CLKID_CPU_CLK_DIV7 121
+#define CLKID_CPU_CLK_DIV8 122
+#define CLKID_APB_SEL 123
#define CLKID_APB 124
+#define CLKID_PERIPH_SEL 125
#define CLKID_PERIPH 126
+#define CLKID_AXI_SEL 127
#define CLKID_AXI 128
#define CLKID_L2_DRAM 130
+#define CLKID_L2_DRAM_SEL 129
+#define CLKID_HDMI_PLL_LVDS_OUT 131
#define CLKID_HDMI_PLL_HDMI_OUT 132
+#define CLKID_VID_PLL_IN_SEL 133
+#define CLKID_VID_PLL_IN_EN 134
+#define CLKID_VID_PLL_PRE_DIV 135
+#define CLKID_VID_PLL_POST_DIV 136
#define CLKID_VID_PLL_FINAL_DIV 137
#define CLKID_VCLK_IN_SEL 138
+#define CLKID_VCLK_IN_EN 139
+#define CLKID_VCLK_DIV1 140
+#define CLKID_VCLK_DIV2_DIV 141
+#define CLKID_VCLK_DIV2 142
+#define CLKID_VCLK_DIV4_DIV 143
+#define CLKID_VCLK_DIV4 144
+#define CLKID_VCLK_DIV6_DIV 145
+#define CLKID_VCLK_DIV6 146
+#define CLKID_VCLK_DIV12_DIV 147
+#define CLKID_VCLK_DIV12 148
#define CLKID_VCLK2_IN_SEL 149
+#define CLKID_VCLK2_IN_EN 150
+#define CLKID_VCLK2_DIV1 151
+#define CLKID_VCLK2_DIV2_DIV 152
+#define CLKID_VCLK2_DIV2 153
+#define CLKID_VCLK2_DIV4_DIV 154
+#define CLKID_VCLK2_DIV4 155
+#define CLKID_VCLK2_DIV6_DIV 156
+#define CLKID_VCLK2_DIV6 157
+#define CLKID_VCLK2_DIV12_DIV 158
+#define CLKID_VCLK2_DIV12 159
+#define CLKID_CTS_ENCT_SEL 160
#define CLKID_CTS_ENCT 161
+#define CLKID_CTS_ENCP_SEL 162
#define CLKID_CTS_ENCP 163
+#define CLKID_CTS_ENCI_SEL 164
#define CLKID_CTS_ENCI 165
+#define CLKID_HDMI_TX_PIXEL_SEL 166
#define CLKID_HDMI_TX_PIXEL 167
+#define CLKID_CTS_ENCL_SEL 168
#define CLKID_CTS_ENCL 169
+#define CLKID_CTS_VDAC0_SEL 170
#define CLKID_CTS_VDAC0 171
+#define CLKID_HDMI_SYS_SEL 172
+#define CLKID_HDMI_SYS_DIV 173
#define CLKID_HDMI_SYS 174
+#define CLKID_MALI_0_SEL 175
+#define CLKID_MALI_0_DIV 176
+#define CLKID_MALI_0 177
+#define CLKID_MALI_1_SEL 178
+#define CLKID_MALI_1_DIV 179
+#define CLKID_MALI_1 180
+#define CLKID_GP_PLL_DCO 181
+#define CLKID_GP_PLL 182
+#define CLKID_VPU_0_SEL 183
+#define CLKID_VPU_0_DIV 184
+#define CLKID_VPU_0 185
+#define CLKID_VPU_1_SEL 186
+#define CLKID_VPU_1_DIV 187
+#define CLKID_VPU_1 189
#define CLKID_VPU 190
+#define CLKID_VDEC_1_SEL 191
+#define CLKID_VDEC_1_1_DIV 192
+#define CLKID_VDEC_1_1 193
+#define CLKID_VDEC_1_2_DIV 194
+#define CLKID_VDEC_1_2 195
#define CLKID_VDEC_1 196
+#define CLKID_VDEC_HCODEC_SEL 197
+#define CLKID_VDEC_HCODEC_DIV 198
#define CLKID_VDEC_HCODEC 199
+#define CLKID_VDEC_2_SEL 200
+#define CLKID_VDEC_2_DIV 201
#define CLKID_VDEC_2 202
+#define CLKID_VDEC_HEVC_SEL 203
+#define CLKID_VDEC_HEVC_DIV 204
+#define CLKID_VDEC_HEVC_EN 205
#define CLKID_VDEC_HEVC 206
+#define CLKID_CTS_AMCLK_SEL 207
+#define CLKID_CTS_AMCLK_DIV 208
#define CLKID_CTS_AMCLK 209
+#define CLKID_CTS_MCLK_I958_SEL 210
+#define CLKID_CTS_MCLK_I958_DIV 211
#define CLKID_CTS_MCLK_I958 212
#define CLKID_CTS_I958 213
+#define CLKID_VCLK_EN 214
+#define CLKID_VCLK2_EN 215
+#define CLKID_VID_PLL_LVDS_EN 216
+#define CLKID_HDMI_PLL_DCO_IN 217
#endif /* __MESON8B_CLKC_H */
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq4019.h b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
index 7e8a7be6dcda..fa0587857547 100644
--- a/include/dt-bindings/clock/qcom,gcc-ipq4019.h
+++ b/include/dt-bindings/clock/qcom,gcc-ipq4019.h
@@ -165,5 +165,11 @@
#define GCC_QDSS_BCR 69
#define GCC_MPM_BCR 70
#define GCC_SPDM_BCR 71
+#define ESS_MAC1_ARES 72
+#define ESS_MAC2_ARES 73
+#define ESS_MAC3_ARES 74
+#define ESS_MAC4_ARES 75
+#define ESS_MAC5_ARES 76
+#define ESS_PSGMII_ARES 77
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq5018.h b/include/dt-bindings/clock/qcom,gcc-ipq5018.h
new file mode 100644
index 000000000000..f3de2fdfeea1
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq5018.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_5018_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_5018_H
+
+#define GPLL0_MAIN 0
+#define GPLL0 1
+#define GPLL2_MAIN 2
+#define GPLL2 3
+#define GPLL4_MAIN 4
+#define GPLL4 5
+#define UBI32_PLL_MAIN 6
+#define UBI32_PLL 7
+#define ADSS_PWM_CLK_SRC 8
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 9
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 10
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 11
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 12
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 13
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 14
+#define BLSP1_UART1_APPS_CLK_SRC 15
+#define BLSP1_UART2_APPS_CLK_SRC 16
+#define CRYPTO_CLK_SRC 17
+#define GCC_ADSS_PWM_CLK 18
+#define GCC_BLSP1_AHB_CLK 19
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 20
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 21
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 22
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 23
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 24
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 25
+#define GCC_BLSP1_UART1_APPS_CLK 26
+#define GCC_BLSP1_UART2_APPS_CLK 27
+#define GCC_BTSS_LPO_CLK 28
+#define GCC_CMN_BLK_AHB_CLK 29
+#define GCC_CMN_BLK_SYS_CLK 30
+#define GCC_CRYPTO_AHB_CLK 31
+#define GCC_CRYPTO_AXI_CLK 32
+#define GCC_CRYPTO_CLK 33
+#define GCC_CRYPTO_PPE_CLK 34
+#define GCC_DCC_CLK 35
+#define GCC_GEPHY_RX_CLK 36
+#define GCC_GEPHY_TX_CLK 37
+#define GCC_GMAC0_CFG_CLK 38
+#define GCC_GMAC0_PTP_CLK 39
+#define GCC_GMAC0_RX_CLK 40
+#define GCC_GMAC0_SYS_CLK 41
+#define GCC_GMAC0_TX_CLK 42
+#define GCC_GMAC1_CFG_CLK 43
+#define GCC_GMAC1_PTP_CLK 44
+#define GCC_GMAC1_RX_CLK 45
+#define GCC_GMAC1_SYS_CLK 46
+#define GCC_GMAC1_TX_CLK 47
+#define GCC_GP1_CLK 48
+#define GCC_GP2_CLK 49
+#define GCC_GP3_CLK 50
+#define GCC_LPASS_CORE_AXIM_CLK 51
+#define GCC_LPASS_SWAY_CLK 52
+#define GCC_MDIO0_AHB_CLK 53
+#define GCC_MDIO1_AHB_CLK 54
+#define GCC_PCIE0_AHB_CLK 55
+#define GCC_PCIE0_AUX_CLK 56
+#define GCC_PCIE0_AXI_M_CLK 57
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 58
+#define GCC_PCIE0_AXI_S_CLK 59
+#define GCC_PCIE0_PIPE_CLK 60
+#define GCC_PCIE1_AHB_CLK 61
+#define GCC_PCIE1_AUX_CLK 62
+#define GCC_PCIE1_AXI_M_CLK 63
+#define GCC_PCIE1_AXI_S_BRIDGE_CLK 64
+#define GCC_PCIE1_AXI_S_CLK 65
+#define GCC_PCIE1_PIPE_CLK 66
+#define GCC_PRNG_AHB_CLK 67
+#define GCC_Q6_AXIM_CLK 68
+#define GCC_Q6_AXIM2_CLK 69
+#define GCC_Q6_AXIS_CLK 70
+#define GCC_Q6_AHB_CLK 71
+#define GCC_Q6_AHB_S_CLK 72
+#define GCC_Q6_TSCTR_1TO2_CLK 73
+#define GCC_Q6SS_ATBM_CLK 74
+#define GCC_Q6SS_PCLKDBG_CLK 75
+#define GCC_Q6SS_TRIG_CLK 76
+#define GCC_QDSS_AT_CLK 77
+#define GCC_QDSS_CFG_AHB_CLK 78
+#define GCC_QDSS_DAP_AHB_CLK 79
+#define GCC_QDSS_DAP_CLK 80
+#define GCC_QDSS_ETR_USB_CLK 81
+#define GCC_QDSS_EUD_AT_CLK 82
+#define GCC_QDSS_STM_CLK 83
+#define GCC_QDSS_TRACECLKIN_CLK 84
+#define GCC_QDSS_TSCTR_DIV8_CLK 85
+#define GCC_QPIC_AHB_CLK 86
+#define GCC_QPIC_CLK 87
+#define GCC_QPIC_IO_MACRO_CLK 88
+#define GCC_SDCC1_AHB_CLK 89
+#define GCC_SDCC1_APPS_CLK 90
+#define GCC_SLEEP_CLK_SRC 91
+#define GCC_SNOC_GMAC0_AHB_CLK 92
+#define GCC_SNOC_GMAC0_AXI_CLK 93
+#define GCC_SNOC_GMAC1_AHB_CLK 94
+#define GCC_SNOC_GMAC1_AXI_CLK 95
+#define GCC_SNOC_LPASS_AXIM_CLK 96
+#define GCC_SNOC_LPASS_SWAY_CLK 97
+#define GCC_SNOC_UBI0_AXI_CLK 98
+#define GCC_SYS_NOC_PCIE0_AXI_CLK 99
+#define GCC_SYS_NOC_PCIE1_AXI_CLK 100
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 101
+#define GCC_SYS_NOC_USB0_AXI_CLK 102
+#define GCC_SYS_NOC_WCSS_AHB_CLK 103
+#define GCC_UBI0_AXI_CLK 104
+#define GCC_UBI0_CFG_CLK 105
+#define GCC_UBI0_CORE_CLK 106
+#define GCC_UBI0_DBG_CLK 107
+#define GCC_UBI0_NC_AXI_CLK 108
+#define GCC_UBI0_UTCM_CLK 109
+#define GCC_UNIPHY_AHB_CLK 110
+#define GCC_UNIPHY_RX_CLK 111
+#define GCC_UNIPHY_SYS_CLK 112
+#define GCC_UNIPHY_TX_CLK 113
+#define GCC_USB0_AUX_CLK 114
+#define GCC_USB0_EUD_AT_CLK 115
+#define GCC_USB0_LFPS_CLK 116
+#define GCC_USB0_MASTER_CLK 117
+#define GCC_USB0_MOCK_UTMI_CLK 118
+#define GCC_USB0_PHY_CFG_AHB_CLK 119
+#define GCC_USB0_SLEEP_CLK 120
+#define GCC_WCSS_ACMT_CLK 121
+#define GCC_WCSS_AHB_S_CLK 122
+#define GCC_WCSS_AXI_M_CLK 123
+#define GCC_WCSS_AXI_S_CLK 124
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK 125
+#define GCC_WCSS_DBG_IFC_APB_CLK 126
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK 127
+#define GCC_WCSS_DBG_IFC_ATB_CLK 128
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_CLK 129
+#define GCC_WCSS_DBG_IFC_DAPBUS_CLK 130
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK 131
+#define GCC_WCSS_DBG_IFC_NTS_CLK 132
+#define GCC_WCSS_ECAHB_CLK 133
+#define GCC_XO_CLK 134
+#define GCC_XO_CLK_SRC 135
+#define GMAC0_RX_CLK_SRC 136
+#define GMAC0_TX_CLK_SRC 137
+#define GMAC1_RX_CLK_SRC 138
+#define GMAC1_TX_CLK_SRC 139
+#define GMAC_CLK_SRC 140
+#define GP1_CLK_SRC 141
+#define GP2_CLK_SRC 142
+#define GP3_CLK_SRC 143
+#define LPASS_AXIM_CLK_SRC 144
+#define LPASS_SWAY_CLK_SRC 145
+#define PCIE0_AUX_CLK_SRC 146
+#define PCIE0_AXI_CLK_SRC 147
+#define PCIE1_AUX_CLK_SRC 148
+#define PCIE1_AXI_CLK_SRC 149
+#define PCNOC_BFDCD_CLK_SRC 150
+#define Q6_AXI_CLK_SRC 151
+#define QDSS_AT_CLK_SRC 152
+#define QDSS_STM_CLK_SRC 153
+#define QDSS_TSCTR_CLK_SRC 154
+#define QDSS_TRACECLKIN_CLK_SRC 155
+#define QPIC_IO_MACRO_CLK_SRC 156
+#define SDCC1_APPS_CLK_SRC 157
+#define SYSTEM_NOC_BFDCD_CLK_SRC 158
+#define UBI0_AXI_CLK_SRC 159
+#define UBI0_CORE_CLK_SRC 160
+#define USB0_AUX_CLK_SRC 161
+#define USB0_LFPS_CLK_SRC 162
+#define USB0_MASTER_CLK_SRC 163
+#define USB0_MOCK_UTMI_CLK_SRC 164
+#define WCSS_AHB_CLK_SRC 165
+#define PCIE0_PIPE_CLK_SRC 166
+#define PCIE1_PIPE_CLK_SRC 167
+#define USB0_PIPE_CLK_SRC 168
+#define GCC_USB0_PIPE_CLK 169
+#define GMAC0_RX_DIV_CLK_SRC 170
+#define GMAC0_TX_DIV_CLK_SRC 171
+#define GMAC1_RX_DIV_CLK_SRC 172
+#define GMAC1_TX_DIV_CLK_SRC 173
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8917.h b/include/dt-bindings/clock/qcom,gcc-msm8917.h
index a371b1adc896..4b421e7414b5 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8917.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8917.h
@@ -169,6 +169,7 @@
#define VFE0_CLK_SRC 162
#define VFE1_CLK_SRC 163
#define VSYNC_CLK_SRC 164
+#define GPLL0_SLEEP_CLK_SRC 165
/* GCC block resets */
#define GCC_CAMSS_MICRO_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index 1badb4f9c58f..b5456a64d421 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -190,6 +190,9 @@
#define AGGRE2_SNOC_NORTH_AXI 181
#define SSC_XO 182
#define SSC_CNOC_AHBS_CLK 183
+#define GCC_MMSS_GPLL0_DIV_CLK 184
+#define GCC_GPU_GPLL0_DIV_CLK 185
+#define GCC_GPU_GPLL0_CLK 186
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
diff --git a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
index 721105ea4fad..845491591784 100644
--- a/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
+++ b/include/dt-bindings/clock/qcom,gcc-sc8280xp.h
@@ -494,5 +494,15 @@
#define USB30_SEC_GDSC 11
#define EMAC_0_GDSC 12
#define EMAC_1_GDSC 13
+#define USB4_1_GDSC 14
+#define USB4_GDSC 15
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 16
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF1_GDSC 17
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF0_GDSC 18
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF1_GDSC 19
+#define HLOS1_VOTE_TURING_MMU_TBU0_GDSC 20
+#define HLOS1_VOTE_TURING_MMU_TBU1_GDSC 21
+#define HLOS1_VOTE_TURING_MMU_TBU2_GDSC 22
+#define HLOS1_VOTE_TURING_MMU_TBU3_GDSC 23
#endif
diff --git a/include/dt-bindings/clock/qcom,ipq9574-gcc.h b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
index b32a7aa65349..08fd3a37acaa 100644
--- a/include/dt-bindings/clock/qcom,ipq9574-gcc.h
+++ b/include/dt-bindings/clock/qcom,ipq9574-gcc.h
@@ -214,4 +214,6 @@
#define GCC_CRYPTO_CLK 205
#define GCC_CRYPTO_AXI_CLK 206
#define GCC_CRYPTO_AHB_CLK 207
+#define GCC_USB0_PIPE_CLK 208
+#define GCC_USB0_SLEEP_CLK 209
#endif
diff --git a/include/dt-bindings/clock/qcom,lcc-mdm9615.h b/include/dt-bindings/clock/qcom,lcc-mdm9615.h
deleted file mode 100644
index 299338ee1d88..000000000000
--- a/include/dt-bindings/clock/qcom,lcc-mdm9615.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
- * Copyright (c) BayLibre, SAS.
- * Author : Neil Armstrong <narmstrong@baylibre.com>
- */
-
-#ifndef _DT_BINDINGS_CLK_LCC_MDM9615_H
-#define _DT_BINDINGS_CLK_LCC_MDM9615_H
-
-#define PLL4 0
-#define MI2S_OSR_SRC 1
-#define MI2S_OSR_CLK 2
-#define MI2S_DIV_CLK 3
-#define MI2S_BIT_DIV_CLK 4
-#define MI2S_BIT_CLK 5
-#define PCM_SRC 6
-#define PCM_CLK_OUT 7
-#define PCM_CLK 8
-#define SLIMBUS_SRC 9
-#define AUDIO_SLIMBUS_CLK 10
-#define SPS_SLIMBUS_CLK 11
-#define CODEC_I2S_MIC_OSR_SRC 12
-#define CODEC_I2S_MIC_OSR_CLK 13
-#define CODEC_I2S_MIC_DIV_CLK 14
-#define CODEC_I2S_MIC_BIT_DIV_CLK 15
-#define CODEC_I2S_MIC_BIT_CLK 16
-#define SPARE_I2S_MIC_OSR_SRC 17
-#define SPARE_I2S_MIC_OSR_CLK 18
-#define SPARE_I2S_MIC_DIV_CLK 19
-#define SPARE_I2S_MIC_BIT_DIV_CLK 20
-#define SPARE_I2S_MIC_BIT_CLK 21
-#define CODEC_I2S_SPKR_OSR_SRC 22
-#define CODEC_I2S_SPKR_OSR_CLK 23
-#define CODEC_I2S_SPKR_DIV_CLK 24
-#define CODEC_I2S_SPKR_BIT_DIV_CLK 25
-#define CODEC_I2S_SPKR_BIT_CLK 26
-#define SPARE_I2S_SPKR_OSR_SRC 27
-#define SPARE_I2S_SPKR_OSR_CLK 28
-#define SPARE_I2S_SPKR_DIV_CLK 29
-#define SPARE_I2S_SPKR_BIT_DIV_CLK 30
-#define SPARE_I2S_SPKR_BIT_CLK 31
-
-#endif
diff --git a/include/dt-bindings/clock/qcom,qdu1000-gcc.h b/include/dt-bindings/clock/qcom,qdu1000-gcc.h
index ddbc6b825e80..2fd36cbfddbb 100644
--- a/include/dt-bindings/clock/qcom,qdu1000-gcc.h
+++ b/include/dt-bindings/clock/qcom,qdu1000-gcc.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
/*
- * Copyright (c) 2021-2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2023, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DT_BINDINGS_CLK_QCOM_GCC_QDU1000_H
@@ -138,6 +138,8 @@
#define GCC_AGGRE_NOC_ECPRI_GSI_CLK 128
#define GCC_PCIE_0_PIPE_CLK_SRC 129
#define GCC_PCIE_0_PHY_AUX_CLK_SRC 130
+#define GCC_GPLL1_OUT_EVEN 131
+#define GCC_DDRSS_ECPRI_GSI_CLK 132
/* GCC resets */
#define GCC_ECPRI_CC_BCR 0
diff --git a/include/dt-bindings/clock/r8a779f0-cpg-mssr.h b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
index f2ae1c6a82dd..c34be5624954 100644
--- a/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
+++ b/include/dt-bindings/clock/r8a779f0-cpg-mssr.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
diff --git a/include/dt-bindings/clock/rockchip,rk3588-cru.h b/include/dt-bindings/clock/rockchip,rk3588-cru.h
index b5616bca7b44..5790b1391201 100644
--- a/include/dt-bindings/clock/rockchip,rk3588-cru.h
+++ b/include/dt-bindings/clock/rockchip,rk3588-cru.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (c) 2021 Rockchip Electronics Co. Ltd.
* Copyright (c) 2022 Collabora Ltd.
diff --git a/include/dt-bindings/clock/samsung,exynosautov9.h b/include/dt-bindings/clock/samsung,exynosautov9.h
index 42133af6d6b9..3065375c2d8b 100644
--- a/include/dt-bindings/clock/samsung,exynosautov9.h
+++ b/include/dt-bindings/clock/samsung,exynosautov9.h
@@ -166,16 +166,12 @@
#define GOUT_CLKCMU_PERIC1_IP 248
#define GOUT_CLKCMU_PERIS_BUS 249
-#define TOP_NR_CLK 250
-
/* CMU_BUSMC */
#define CLK_MOUT_BUSMC_BUS_USER 1
#define CLK_DOUT_BUSMC_BUSP 2
#define CLK_GOUT_BUSMC_PDMA0_PCLK 3
#define CLK_GOUT_BUSMC_SPDMA_PCLK 4
-#define BUSMC_NR_CLK 5
-
/* CMU_CORE */
#define CLK_MOUT_CORE_BUS_USER 1
#define CLK_DOUT_CORE_BUSP 2
@@ -183,8 +179,6 @@
#define CLK_GOUT_CORE_CCI_PCLK 4
#define CLK_GOUT_CORE_CMU_CORE_PCLK 5
-#define CORE_NR_CLK 6
-
/* CMU_FSYS0 */
#define CLK_MOUT_FSYS0_BUS_USER 1
#define CLK_MOUT_FSYS0_PCIE_USER 2
@@ -226,8 +220,6 @@
#define CLK_GOUT_FSYS0_PCIE_GEN3A_4L_CLK 35
#define CLK_GOUT_FSYS0_PCIE_GEN3B_4L_CLK 36
-#define FSYS0_NR_CLK 37
-
/* CMU_FSYS1 */
#define FOUT_MMC_PLL 1
@@ -251,8 +243,6 @@
#define CLK_GOUT_FSYS1_USB30_0_ACLK 17
#define CLK_GOUT_FSYS1_USB30_1_ACLK 18
-#define FSYS1_NR_CLK 19
-
/* CMU_FSYS2 */
#define CLK_MOUT_FSYS2_BUS_USER 1
#define CLK_MOUT_FSYS2_UFS_EMBD_USER 2
@@ -262,8 +252,6 @@
#define CLK_GOUT_FSYS2_UFS_EMBD1_ACLK 6
#define CLK_GOUT_FSYS2_UFS_EMBD1_UNIPRO 7
-#define FSYS2_NR_CLK 8
-
/* CMU_PERIC0 */
#define CLK_MOUT_PERIC0_BUS_USER 1
#define CLK_MOUT_PERIC0_IP_USER 2
@@ -308,8 +296,6 @@
#define CLK_GOUT_PERIC0_PCLK_10 42
#define CLK_GOUT_PERIC0_PCLK_11 43
-#define PERIC0_NR_CLK 44
-
/* CMU_PERIC1 */
#define CLK_MOUT_PERIC1_BUS_USER 1
#define CLK_MOUT_PERIC1_IP_USER 2
@@ -354,14 +340,10 @@
#define CLK_GOUT_PERIC1_PCLK_10 42
#define CLK_GOUT_PERIC1_PCLK_11 43
-#define PERIC1_NR_CLK 44
-
/* CMU_PERIS */
#define CLK_MOUT_PERIS_BUS_USER 1
#define CLK_GOUT_SYSREG_PERIS_PCLK 2
#define CLK_GOUT_WDT_CLUSTER0 3
#define CLK_GOUT_WDT_CLUSTER1 4
-#define PERIS_NR_CLK 5
-
#endif /* _DT_BINDINGS_CLOCK_EXYNOSAUTOV9_H */
diff --git a/include/dt-bindings/clock/starfive,jh7110-crg.h b/include/dt-bindings/clock/starfive,jh7110-crg.h
index 06257bfd9ac1..467ccab3bfaa 100644
--- a/include/dt-bindings/clock/starfive,jh7110-crg.h
+++ b/include/dt-bindings/clock/starfive,jh7110-crg.h
@@ -1,11 +1,18 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright 2022 StarFive Technology Co., Ltd.
*/
#ifndef __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__
#define __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__
+/* PLL clocks */
+#define JH7110_PLLCLK_PLL0_OUT 0
+#define JH7110_PLLCLK_PLL1_OUT 1
+#define JH7110_PLLCLK_PLL2_OUT 2
+#define JH7110_PLLCLK_END 3
+
/* SYSCRG clocks */
#define JH7110_SYSCLK_CPU_ROOT 0
#define JH7110_SYSCLK_CPU_CORE 1
@@ -218,4 +225,77 @@
#define JH7110_AONCLK_END 14
+/* STGCRG clocks */
+#define JH7110_STGCLK_HIFI4_CLK_CORE 0
+#define JH7110_STGCLK_USB0_APB 1
+#define JH7110_STGCLK_USB0_UTMI_APB 2
+#define JH7110_STGCLK_USB0_AXI 3
+#define JH7110_STGCLK_USB0_LPM 4
+#define JH7110_STGCLK_USB0_STB 5
+#define JH7110_STGCLK_USB0_APP_125 6
+#define JH7110_STGCLK_USB0_REFCLK 7
+#define JH7110_STGCLK_PCIE0_AXI_MST0 8
+#define JH7110_STGCLK_PCIE0_APB 9
+#define JH7110_STGCLK_PCIE0_TL 10
+#define JH7110_STGCLK_PCIE1_AXI_MST0 11
+#define JH7110_STGCLK_PCIE1_APB 12
+#define JH7110_STGCLK_PCIE1_TL 13
+#define JH7110_STGCLK_PCIE_SLV_MAIN 14
+#define JH7110_STGCLK_SEC_AHB 15
+#define JH7110_STGCLK_SEC_MISC_AHB 16
+#define JH7110_STGCLK_GRP0_MAIN 17
+#define JH7110_STGCLK_GRP0_BUS 18
+#define JH7110_STGCLK_GRP0_STG 19
+#define JH7110_STGCLK_GRP1_MAIN 20
+#define JH7110_STGCLK_GRP1_BUS 21
+#define JH7110_STGCLK_GRP1_STG 22
+#define JH7110_STGCLK_GRP1_HIFI 23
+#define JH7110_STGCLK_E2_RTC 24
+#define JH7110_STGCLK_E2_CORE 25
+#define JH7110_STGCLK_E2_DBG 26
+#define JH7110_STGCLK_DMA1P_AXI 27
+#define JH7110_STGCLK_DMA1P_AHB 28
+
+#define JH7110_STGCLK_END 29
+
+/* ISPCRG clocks */
+#define JH7110_ISPCLK_DOM4_APB_FUNC 0
+#define JH7110_ISPCLK_MIPI_RX0_PXL 1
+#define JH7110_ISPCLK_DVP_INV 2
+#define JH7110_ISPCLK_M31DPHY_CFG_IN 3
+#define JH7110_ISPCLK_M31DPHY_REF_IN 4
+#define JH7110_ISPCLK_M31DPHY_TX_ESC_LAN0 5
+#define JH7110_ISPCLK_VIN_APB 6
+#define JH7110_ISPCLK_VIN_SYS 7
+#define JH7110_ISPCLK_VIN_PIXEL_IF0 8
+#define JH7110_ISPCLK_VIN_PIXEL_IF1 9
+#define JH7110_ISPCLK_VIN_PIXEL_IF2 10
+#define JH7110_ISPCLK_VIN_PIXEL_IF3 11
+#define JH7110_ISPCLK_VIN_P_AXI_WR 12
+#define JH7110_ISPCLK_ISPV2_TOP_WRAPPER_C 13
+
+#define JH7110_ISPCLK_END 14
+
+/* VOUTCRG clocks */
+#define JH7110_VOUTCLK_APB 0
+#define JH7110_VOUTCLK_DC8200_PIX 1
+#define JH7110_VOUTCLK_DSI_SYS 2
+#define JH7110_VOUTCLK_TX_ESC 3
+#define JH7110_VOUTCLK_DC8200_AXI 4
+#define JH7110_VOUTCLK_DC8200_CORE 5
+#define JH7110_VOUTCLK_DC8200_AHB 6
+#define JH7110_VOUTCLK_DC8200_PIX0 7
+#define JH7110_VOUTCLK_DC8200_PIX1 8
+#define JH7110_VOUTCLK_DOM_VOUT_TOP_LCD 9
+#define JH7110_VOUTCLK_DSITX_APB 10
+#define JH7110_VOUTCLK_DSITX_SYS 11
+#define JH7110_VOUTCLK_DSITX_DPI 12
+#define JH7110_VOUTCLK_DSITX_TXESC 13
+#define JH7110_VOUTCLK_MIPITX_DPHY_TXESC 14
+#define JH7110_VOUTCLK_HDMI_TX_MCLK 15
+#define JH7110_VOUTCLK_HDMI_TX_BCLK 16
+#define JH7110_VOUTCLK_HDMI_TX_SYS 17
+
+#define JH7110_VOUTCLK_END 18
+
#endif /* __DT_BINDINGS_CLOCK_STARFIVE_JH7110_CRG_H__ */
diff --git a/include/dt-bindings/clock/stm32mp1-clks.h b/include/dt-bindings/clock/stm32mp1-clks.h
index 25e8cfd43459..0a5324bcdbda 100644
--- a/include/dt-bindings/clock/stm32mp1-clks.h
+++ b/include/dt-bindings/clock/stm32mp1-clks.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
diff --git a/include/dt-bindings/clock/sun20i-d1-ccu.h b/include/dt-bindings/clock/sun20i-d1-ccu.h
index e143b9929763..fdbfb404f92a 100644
--- a/include/dt-bindings/clock/sun20i-d1-ccu.h
+++ b/include/dt-bindings/clock/sun20i-d1-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2020 huangzhenwei@allwinnertech.com
* Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
diff --git a/include/dt-bindings/clock/sun20i-d1-r-ccu.h b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
index 4c2697fd32b0..f95c170711e5 100644
--- a/include/dt-bindings/clock/sun20i-d1-r-ccu.h
+++ b/include/dt-bindings/clock/sun20i-d1-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
*/
diff --git a/include/dt-bindings/clock/sun50i-a100-ccu.h b/include/dt-bindings/clock/sun50i-a100-ccu.h
index 28dc36e1a232..06a2031d466b 100644
--- a/include/dt-bindings/clock/sun50i-a100-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a100-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/clock/sun50i-h6-ccu.h b/include/dt-bindings/clock/sun50i-h6-ccu.h
index a1545cd60e75..ef9123d81937 100644
--- a/include/dt-bindings/clock/sun50i-h6-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h6-ccu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/include/dt-bindings/clock/sun50i-h616-ccu.h b/include/dt-bindings/clock/sun50i-h616-ccu.h
index 1191aca53ac6..6f8f01e67628 100644
--- a/include/dt-bindings/clock/sun50i-h616-ccu.h
+++ b/include/dt-bindings/clock/sun50i-h616-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2020 Arm Ltd.
*/
diff --git a/include/dt-bindings/clock/sun6i-rtc.h b/include/dt-bindings/clock/sun6i-rtc.h
index c845493e4d37..3bd3aa3d57ce 100644
--- a/include/dt-bindings/clock/sun6i-rtc.h
+++ b/include/dt-bindings/clock/sun6i-rtc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
#ifndef _DT_BINDINGS_CLK_SUN6I_RTC_H_
#define _DT_BINDINGS_CLK_SUN6I_RTC_H_
diff --git a/include/dt-bindings/display/sdtv-standards.h b/include/dt-bindings/display/sdtv-standards.h
index fbc1a3db2ea7..8249a2b47b79 100644
--- a/include/dt-bindings/display/sdtv-standards.h
+++ b/include/dt-bindings/display/sdtv-standards.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0-only or X11 */
+/* SPDX-License-Identifier: GPL-2.0-only OR X11 */
/*
* Copyright 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
*/
diff --git a/include/dt-bindings/firmware/qcom,scm.h b/include/dt-bindings/firmware/qcom,scm.h
index d1dc09e72923..6de8b08e1e79 100644
--- a/include/dt-bindings/firmware/qcom,scm.h
+++ b/include/dt-bindings/firmware/qcom,scm.h
@@ -2,17 +2,38 @@
/*
* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
+ * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
*/
#ifndef _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
#define _DT_BINDINGS_FIRMWARE_QCOM_SCM_H
+#define QCOM_SCM_VMID_TZ 0x1
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_SSC_Q6 0x5
#define QCOM_SCM_VMID_ADSP_Q6 0x6
+#define QCOM_SCM_VMID_CP_TOUCH 0x8
+#define QCOM_SCM_VMID_CP_BITSTREAM 0x9
+#define QCOM_SCM_VMID_CP_PIXEL 0xA
+#define QCOM_SCM_VMID_CP_NON_PIXEL 0xB
+#define QCOM_SCM_VMID_CP_CAMERA 0xD
+#define QCOM_SCM_VMID_HLOS_FREE 0xE
#define QCOM_SCM_VMID_MSS_MSA 0xF
+#define QCOM_SCM_VMID_MSS_NONMSA 0x10
+#define QCOM_SCM_VMID_CP_SEC_DISPLAY 0x11
+#define QCOM_SCM_VMID_CP_APP 0x12
+#define QCOM_SCM_VMID_LPASS 0x16
#define QCOM_SCM_VMID_WLAN 0x18
#define QCOM_SCM_VMID_WLAN_CE 0x19
+#define QCOM_SCM_VMID_CP_SPSS_SP 0x1A
+#define QCOM_SCM_VMID_CP_CAMERA_PREVIEW 0x1D
+#define QCOM_SCM_VMID_CDSP 0x1E
+#define QCOM_SCM_VMID_CP_SPSS_SP_SHARED 0x22
+#define QCOM_SCM_VMID_CP_SPSS_HLOS_SHARED 0x24
+#define QCOM_SCM_VMID_ADSP_HEAP 0x25
+#define QCOM_SCM_VMID_CP_CDSP 0x2A
#define QCOM_SCM_VMID_NAV 0x2B
+#define QCOM_SCM_VMID_TVM 0x2D
+#define QCOM_SCM_VMID_OEMVM 0x31
#endif
diff --git a/include/dt-bindings/gpio/amlogic-c3-gpio.h b/include/dt-bindings/gpio/amlogic-c3-gpio.h
new file mode 100644
index 000000000000..75c8da6f505f
--- /dev/null
+++ b/include/dt-bindings/gpio/amlogic-c3-gpio.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2021 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_C3_GPIO_H
+#define _DT_BINDINGS_AMLOGIC_C3_GPIO_H
+
+#define GPIOE_0 0
+#define GPIOE_1 1
+#define GPIOE_2 2
+#define GPIOE_3 3
+#define GPIOE_4 4
+
+#define GPIOB_0 5
+#define GPIOB_1 6
+#define GPIOB_2 7
+#define GPIOB_3 8
+#define GPIOB_4 9
+#define GPIOB_5 10
+#define GPIOB_6 11
+#define GPIOB_7 12
+#define GPIOB_8 13
+#define GPIOB_9 14
+#define GPIOB_10 15
+#define GPIOB_11 16
+#define GPIOB_12 17
+#define GPIOB_13 18
+#define GPIOB_14 19
+
+#define GPIOC_0 20
+#define GPIOC_1 21
+#define GPIOC_2 22
+#define GPIOC_3 23
+#define GPIOC_4 24
+#define GPIOC_5 25
+#define GPIOC_6 26
+
+#define GPIOX_0 27
+#define GPIOX_1 28
+#define GPIOX_2 29
+#define GPIOX_3 30
+#define GPIOX_4 31
+#define GPIOX_5 32
+#define GPIOX_6 33
+#define GPIOX_7 34
+#define GPIOX_8 35
+#define GPIOX_9 36
+#define GPIOX_10 37
+#define GPIOX_11 38
+#define GPIOX_12 39
+#define GPIOX_13 40
+
+#define GPIOD_0 41
+#define GPIOD_1 42
+#define GPIOD_2 43
+#define GPIOD_3 44
+#define GPIOD_4 45
+#define GPIOD_5 46
+#define GPIOD_6 47
+
+#define GPIOA_0 48
+#define GPIOA_1 49
+#define GPIOA_2 50
+#define GPIOA_3 51
+#define GPIOA_4 52
+#define GPIOA_5 53
+
+#define GPIO_TEST_N 54
+
+#endif /* _DT_BINDINGS_AMLOGIC_C3_GPIO_H */
diff --git a/include/dt-bindings/gpio/meson-g12a-gpio.h b/include/dt-bindings/gpio/meson-g12a-gpio.h
index f7bd69350d18..fa7bb0bbf010 100644
--- a/include/dt-bindings/gpio/meson-g12a-gpio.h
+++ b/include/dt-bindings/gpio/meson-g12a-gpio.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2018 Amlogic, Inc. All rights reserved.
* Author: Xingyu Chen <xingyu.chen@amlogic.com>
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
index 09fd169ad18e..5d98f7d48a1e 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350.h
@@ -6,58 +6,60 @@
#ifndef _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
#define _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PM8350_ADC for PMIC7 */
-#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | 0x0)
-#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | 0x01)
-#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | 0x02)
-#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | 0x03)
-
-#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | 0x04)
-#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | 0x05)
-#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | 0x06)
-#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | 0x07)
-#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | 0x08)
-#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | 0x0a)
-#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | 0x0b)
-#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | 0x0c)
-#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | 0x0d)
+#define PM8350_ADC7_REF_GND(sid) ((sid) << 8 | ADC7_REF_GND)
+#define PM8350_ADC7_1P25VREF(sid) ((sid) << 8 | ADC7_1P25VREF)
+#define PM8350_ADC7_VREF_VADC(sid) ((sid) << 8 | ADC7_VREF_VADC)
+#define PM8350_ADC7_DIE_TEMP(sid) ((sid) << 8 | ADC7_DIE_TEMP)
+
+#define PM8350_ADC7_AMUX_THM1(sid) ((sid) << 8 | ADC7_AMUX_THM1)
+#define PM8350_ADC7_AMUX_THM2(sid) ((sid) << 8 | ADC7_AMUX_THM2)
+#define PM8350_ADC7_AMUX_THM3(sid) ((sid) << 8 | ADC7_AMUX_THM3)
+#define PM8350_ADC7_AMUX_THM4(sid) ((sid) << 8 | ADC7_AMUX_THM4)
+#define PM8350_ADC7_AMUX_THM5(sid) ((sid) << 8 | ADC7_AMUX_THM5)
+#define PM8350_ADC7_GPIO1(sid) ((sid) << 8 | ADC7_GPIO1)
+#define PM8350_ADC7_GPIO2(sid) ((sid) << 8 | ADC7_GPIO2)
+#define PM8350_ADC7_GPIO3(sid) ((sid) << 8 | ADC7_GPIO3)
+#define PM8350_ADC7_GPIO4(sid) ((sid) << 8 | ADC7_GPIO4)
/* 30k pull-up1 */
-#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | 0x24)
-#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | 0x25)
-#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | 0x26)
-#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | 0x27)
-#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | 0x28)
-#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | 0x2a)
-#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | 0x2b)
-#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | 0x2c)
-#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | 0x2d)
+#define PM8350_ADC7_AMUX_THM1_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350_ADC7_AMUX_THM2_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350_ADC7_AMUX_THM3_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350_ADC7_AMUX_THM4_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350_ADC7_AMUX_THM5_30K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350_ADC7_GPIO1_30K_PU(sid) ((sid) << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350_ADC7_GPIO2_30K_PU(sid) ((sid) << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350_ADC7_GPIO3_30K_PU(sid) ((sid) << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350_ADC7_GPIO4_30K_PU(sid) ((sid) << 8 | ADC7_GPIO4_30K_PU)
/* 100k pull-up2 */
-#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | 0x44)
-#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | 0x45)
-#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | 0x46)
-#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | 0x47)
-#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | 0x48)
-#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | 0x4a)
-#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | 0x4b)
-#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | 0x4c)
-#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | 0x4d)
+#define PM8350_ADC7_AMUX_THM1_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350_ADC7_AMUX_THM2_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350_ADC7_AMUX_THM3_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350_ADC7_AMUX_THM4_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350_ADC7_AMUX_THM5_100K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350_ADC7_GPIO1_100K_PU(sid) ((sid) << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350_ADC7_GPIO2_100K_PU(sid) ((sid) << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350_ADC7_GPIO3_100K_PU(sid) ((sid) << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350_ADC7_GPIO4_100K_PU(sid) ((sid) << 8 | ADC7_GPIO4_100K_PU)
/* 400k pull-up3 */
-#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | 0x64)
-#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | 0x65)
-#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | 0x66)
-#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | 0x67)
-#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | 0x68)
-#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | 0x6a)
-#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | 0x6b)
-#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | 0x6c)
-#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | 0x6d)
+#define PM8350_ADC7_AMUX_THM1_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350_ADC7_AMUX_THM2_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350_ADC7_AMUX_THM3_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350_ADC7_AMUX_THM4_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350_ADC7_AMUX_THM5_400K_PU(sid) ((sid) << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350_ADC7_GPIO1_400K_PU(sid) ((sid) << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350_ADC7_GPIO2_400K_PU(sid) ((sid) << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350_ADC7_GPIO3_400K_PU(sid) ((sid) << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350_ADC7_GPIO4_400K_PU(sid) ((sid) << 8 | ADC7_GPIO4_400K_PU)
/* 1/3 Divider */
-#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | 0x8d)
+#define PM8350_ADC7_GPIO4_DIV3(sid) ((sid) << 8 | ADC7_GPIO4_DIV3)
-#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | 0x8e)
+#define PM8350_ADC7_VPH_PWR(sid) ((sid) << 8 | ADC7_VPH_PWR)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
index dc2497c27e16..57c7977666d3 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pm8350b.h
@@ -10,79 +10,81 @@
#define PM8350B_SID 3
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PM8350B_ADC for PMIC7 */
-#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | 0x0)
-#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | 0x01)
-#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | 0x02)
-#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | 0x03)
+#define PM8350B_ADC7_REF_GND (PM8350B_SID << 8 | ADC7_REF_GND)
+#define PM8350B_ADC7_1P25VREF (PM8350B_SID << 8 | ADC7_1P25VREF)
+#define PM8350B_ADC7_VREF_VADC (PM8350B_SID << 8 | ADC7_VREF_VADC)
+#define PM8350B_ADC7_DIE_TEMP (PM8350B_SID << 8 | ADC7_DIE_TEMP)
-#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | 0x04)
-#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | 0x05)
-#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | 0x06)
-#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | 0x07)
-#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | 0x08)
-#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | 0x09)
-#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | 0x0a)
-#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | 0x0b)
-#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | 0x0c)
-#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | 0x0d)
+#define PM8350B_ADC7_AMUX_THM1 (PM8350B_SID << 8 | ADC7_AMUX_THM1)
+#define PM8350B_ADC7_AMUX_THM2 (PM8350B_SID << 8 | ADC7_AMUX_THM2)
+#define PM8350B_ADC7_AMUX_THM3 (PM8350B_SID << 8 | ADC7_AMUX_THM3)
+#define PM8350B_ADC7_AMUX_THM4 (PM8350B_SID << 8 | ADC7_AMUX_THM4)
+#define PM8350B_ADC7_AMUX_THM5 (PM8350B_SID << 8 | ADC7_AMUX_THM5)
+#define PM8350B_ADC7_AMUX_THM6 (PM8350B_SID << 8 | ADC7_AMUX_THM6)
+#define PM8350B_ADC7_GPIO1 (PM8350B_SID << 8 | ADC7_GPIO1)
+#define PM8350B_ADC7_GPIO2 (PM8350B_SID << 8 | ADC7_GPIO2)
+#define PM8350B_ADC7_GPIO3 (PM8350B_SID << 8 | ADC7_GPIO3)
+#define PM8350B_ADC7_GPIO4 (PM8350B_SID << 8 | ADC7_GPIO4)
-#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | 0x10)
-#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | 0x11)
-#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | 0x12)
-#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | 0x13)
-#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | 0x15)
-#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | 0x17)
+#define PM8350B_ADC7_CHG_TEMP (PM8350B_SID << 8 | ADC7_CHG_TEMP)
+#define PM8350B_ADC7_USB_IN_V_16 (PM8350B_SID << 8 | ADC7_USB_IN_V_16)
+#define PM8350B_ADC7_VDC_16 (PM8350B_SID << 8 | ADC7_VDC_16)
+#define PM8350B_ADC7_CC1_ID (PM8350B_SID << 8 | ADC7_CC1_ID)
+#define PM8350B_ADC7_VREF_BAT_THERM (PM8350B_SID << 8 | ADC7_VREF_BAT_THERM)
+#define PM8350B_ADC7_IIN_FB (PM8350B_SID << 8 | ADC7_IIN_FB)
/* 30k pull-up1 */
-#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | 0x24)
-#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | 0x25)
-#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | 0x26)
-#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | 0x27)
-#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | 0x28)
-#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | 0x29)
-#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | 0x2a)
-#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | 0x2b)
-#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | 0x2c)
-#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | 0x2d)
-#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | 0x33)
+#define PM8350B_ADC7_AMUX_THM1_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PM8350B_ADC7_AMUX_THM2_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PM8350B_ADC7_AMUX_THM3_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PM8350B_ADC7_AMUX_THM4_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PM8350B_ADC7_AMUX_THM5_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_30K_PU)
+#define PM8350B_ADC7_AMUX_THM6_30K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_30K_PU)
+#define PM8350B_ADC7_GPIO1_30K_PU (PM8350B_SID << 8 | ADC7_GPIO1_30K_PU)
+#define PM8350B_ADC7_GPIO2_30K_PU (PM8350B_SID << 8 | ADC7_GPIO2_30K_PU)
+#define PM8350B_ADC7_GPIO3_30K_PU (PM8350B_SID << 8 | ADC7_GPIO3_30K_PU)
+#define PM8350B_ADC7_GPIO4_30K_PU (PM8350B_SID << 8 | ADC7_GPIO4_30K_PU)
+#define PM8350B_ADC7_CC1_ID_30K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_30K_PU)
/* 100k pull-up2 */
-#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | 0x44)
-#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | 0x45)
-#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | 0x46)
-#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | 0x47)
-#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | 0x48)
-#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | 0x49)
-#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | 0x4a)
-#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | 0x4b)
-#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | 0x4c)
-#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | 0x4d)
-#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | 0x53)
+#define PM8350B_ADC7_AMUX_THM1_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PM8350B_ADC7_AMUX_THM2_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PM8350B_ADC7_AMUX_THM3_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PM8350B_ADC7_AMUX_THM4_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PM8350B_ADC7_AMUX_THM5_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_100K_PU)
+#define PM8350B_ADC7_AMUX_THM6_100K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_100K_PU)
+#define PM8350B_ADC7_GPIO1_100K_PU (PM8350B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PM8350B_ADC7_GPIO2_100K_PU (PM8350B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PM8350B_ADC7_GPIO3_100K_PU (PM8350B_SID << 8 | ADC7_GPIO3_100K_PU)
+#define PM8350B_ADC7_GPIO4_100K_PU (PM8350B_SID << 8 | ADC7_GPIO4_100K_PU)
+#define PM8350B_ADC7_CC1_ID_100K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_100K_PU)
/* 400k pull-up3 */
-#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | 0x64)
-#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | 0x65)
-#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | 0x66)
-#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | 0x67)
-#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | 0x68)
-#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | 0x69)
-#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | 0x6a)
-#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | 0x6b)
-#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | 0x6c)
-#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | 0x6d)
-#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | 0x73)
+#define PM8350B_ADC7_AMUX_THM1_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PM8350B_ADC7_AMUX_THM2_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PM8350B_ADC7_AMUX_THM3_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PM8350B_ADC7_AMUX_THM4_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PM8350B_ADC7_AMUX_THM5_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM5_400K_PU)
+#define PM8350B_ADC7_AMUX_THM6_400K_PU (PM8350B_SID << 8 | ADC7_AMUX_THM6_400K_PU)
+#define PM8350B_ADC7_GPIO1_400K_PU (PM8350B_SID << 8 | ADC7_GPIO1_400K_PU)
+#define PM8350B_ADC7_GPIO2_400K_PU (PM8350B_SID << 8 | ADC7_GPIO2_400K_PU)
+#define PM8350B_ADC7_GPIO3_400K_PU (PM8350B_SID << 8 | ADC7_GPIO3_400K_PU)
+#define PM8350B_ADC7_GPIO4_400K_PU (PM8350B_SID << 8 | ADC7_GPIO4_400K_PU)
+#define PM8350B_ADC7_CC1_ID_400K_PU (PM8350B_SID << 8 | ADC7_CC1_ID_400K_PU)
/* 1/3 Divider */
-#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | 0x8a)
-#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | 0x8b)
-#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | 0x8c)
-#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | 0x8d)
+#define PM8350B_ADC7_GPIO1_DIV3 (PM8350B_SID << 8 | ADC7_GPIO1_DIV3)
+#define PM8350B_ADC7_GPIO2_DIV3 (PM8350B_SID << 8 | ADC7_GPIO2_DIV3)
+#define PM8350B_ADC7_GPIO3_DIV3 (PM8350B_SID << 8 | ADC7_GPIO3_DIV3)
+#define PM8350B_ADC7_GPIO4_DIV3 (PM8350B_SID << 8 | ADC7_GPIO4_DIV3)
-#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | 0x8e)
-#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | 0x8f)
+#define PM8350B_ADC7_VPH_PWR (PM8350B_SID << 8 | ADC7_VPH_PWR)
+#define PM8350B_ADC7_VBAT_SNS (PM8350B_SID << 8 | ADC7_VBAT_SNS)
-#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | 0x94)
-#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | 0x96)
+#define PM8350B_ADC7_SBUx (PM8350B_SID << 8 | ADC7_SBU)
+#define PM8350B_ADC7_VBAT_2S_MID (PM8350B_SID << 8 | ADC7_VBAT_2S_MID)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PM8350B_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
index 6c296870e95b..3d1a41a22cef 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmk8350.h
@@ -10,37 +10,39 @@
#define PMK8350_SID 0
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMK8350_ADC for PMIC7 */
-#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | 0x0)
-#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | 0x01)
-#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | 0x02)
-#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | 0x03)
+#define PMK8350_ADC7_REF_GND (PMK8350_SID << 8 | ADC7_REF_GND)
+#define PMK8350_ADC7_1P25VREF (PMK8350_SID << 8 | ADC7_1P25VREF)
+#define PMK8350_ADC7_VREF_VADC (PMK8350_SID << 8 | ADC7_VREF_VADC)
+#define PMK8350_ADC7_DIE_TEMP (PMK8350_SID << 8 | ADC7_DIE_TEMP)
-#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | 0x04)
-#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | 0x05)
-#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | 0x06)
-#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | 0x07)
-#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | 0x08)
+#define PMK8350_ADC7_AMUX_THM1 (PMK8350_SID << 8 | ADC7_AMUX_THM1)
+#define PMK8350_ADC7_AMUX_THM2 (PMK8350_SID << 8 | ADC7_AMUX_THM2)
+#define PMK8350_ADC7_AMUX_THM3 (PMK8350_SID << 8 | ADC7_AMUX_THM3)
+#define PMK8350_ADC7_AMUX_THM4 (PMK8350_SID << 8 | ADC7_AMUX_THM4)
+#define PMK8350_ADC7_AMUX_THM5 (PMK8350_SID << 8 | ADC7_AMUX_THM5)
/* 30k pull-up1 */
-#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | 0x24)
-#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | 0x25)
-#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | 0x26)
-#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | 0x27)
-#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | 0x28)
+#define PMK8350_ADC7_AMUX_THM1_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_30K_PU)
+#define PMK8350_ADC7_AMUX_THM2_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_30K_PU)
+#define PMK8350_ADC7_AMUX_THM3_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_30K_PU)
+#define PMK8350_ADC7_AMUX_THM4_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_30K_PU)
+#define PMK8350_ADC7_AMUX_THM5_30K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_30K_PU)
/* 100k pull-up2 */
-#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | 0x44)
-#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | 0x45)
-#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | 0x46)
-#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | 0x47)
-#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | 0x48)
+#define PMK8350_ADC7_AMUX_THM1_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_100K_PU)
+#define PMK8350_ADC7_AMUX_THM2_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_100K_PU)
+#define PMK8350_ADC7_AMUX_THM3_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_100K_PU)
+#define PMK8350_ADC7_AMUX_THM4_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_100K_PU)
+#define PMK8350_ADC7_AMUX_THM5_100K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_100K_PU)
/* 400k pull-up3 */
-#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | 0x64)
-#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | 0x65)
-#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | 0x66)
-#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | 0x67)
-#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | 0x68)
+#define PMK8350_ADC7_AMUX_THM1_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM1_400K_PU)
+#define PMK8350_ADC7_AMUX_THM2_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM2_400K_PU)
+#define PMK8350_ADC7_AMUX_THM3_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM3_400K_PU)
+#define PMK8350_ADC7_AMUX_THM4_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM4_400K_PU)
+#define PMK8350_ADC7_AMUX_THM5_400K_PU (PMK8350_SID << 8 | ADC7_AMUX_THM5_400K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMK8350_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
index d6df1b19e5ff..c5adfa82b20d 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735a.h
@@ -10,19 +10,21 @@
#define PMR735A_SID 4
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735A_ADC for PMIC7 */
-#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | 0x0)
-#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | 0x01)
-#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | 0x02)
-#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | 0x03)
+#define PMR735A_ADC7_REF_GND (PMR735A_SID << 8 | ADC7_REF_GND)
+#define PMR735A_ADC7_1P25VREF (PMR735A_SID << 8 | ADC7_1P25VREF)
+#define PMR735A_ADC7_VREF_VADC (PMR735A_SID << 8 | ADC7_VREF_VADC)
+#define PMR735A_ADC7_DIE_TEMP (PMR735A_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | 0x0a)
-#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | 0x0b)
-#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | 0x0c)
+#define PMR735A_ADC7_GPIO1 (PMR735A_SID << 8 | ADC7_GPIO1)
+#define PMR735A_ADC7_GPIO2 (PMR735A_SID << 8 | ADC7_GPIO2)
+#define PMR735A_ADC7_GPIO3 (PMR735A_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | 0x4a)
-#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | 0x4b)
-#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | 0x4c)
+#define PMR735A_ADC7_GPIO1_100K_PU (PMR735A_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735A_ADC7_GPIO2_100K_PU (PMR735A_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735A_ADC7_GPIO3_100K_PU (PMR735A_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735A_H */
diff --git a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
index 8da0e7dab315..fdb8dd9ae541 100644
--- a/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
+++ b/include/dt-bindings/iio/qcom,spmi-adc7-pmr735b.h
@@ -10,19 +10,21 @@
#define PMR735B_SID 5
#endif
+#include <dt-bindings/iio/qcom,spmi-vadc.h>
+
/* ADC channels for PMR735B_ADC for PMIC7 */
-#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | 0x0)
-#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | 0x01)
-#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | 0x02)
-#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | 0x03)
+#define PMR735B_ADC7_REF_GND (PMR735B_SID << 8 | ADC7_REF_GND)
+#define PMR735B_ADC7_1P25VREF (PMR735B_SID << 8 | ADC7_1P25VREF)
+#define PMR735B_ADC7_VREF_VADC (PMR735B_SID << 8 | ADC7_VREF_VADC)
+#define PMR735B_ADC7_DIE_TEMP (PMR735B_SID << 8 | ADC7_DIE_TEMP)
-#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | 0x0a)
-#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | 0x0b)
-#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | 0x0c)
+#define PMR735B_ADC7_GPIO1 (PMR735B_SID << 8 | ADC7_GPIO1)
+#define PMR735B_ADC7_GPIO2 (PMR735B_SID << 8 | ADC7_GPIO2)
+#define PMR735B_ADC7_GPIO3 (PMR735B_SID << 8 | ADC7_GPIO3)
/* 100k pull-up2 */
-#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | 0x4a)
-#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | 0x4b)
-#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | 0x4c)
+#define PMR735B_ADC7_GPIO1_100K_PU (PMR735B_SID << 8 | ADC7_GPIO1_100K_PU)
+#define PMR735B_ADC7_GPIO2_100K_PU (PMR735B_SID << 8 | ADC7_GPIO2_100K_PU)
+#define PMR735B_ADC7_GPIO3_100K_PU (PMR735B_SID << 8 | ADC7_GPIO3_100K_PU)
#endif /* _DT_BINDINGS_QCOM_SPMI_VADC_PMR735B_H */
diff --git a/include/dt-bindings/interconnect/qcom,rpm-icc.h b/include/dt-bindings/interconnect/qcom,rpm-icc.h
new file mode 100644
index 000000000000..2cd56f91e5c5
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,rpm-icc.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Linaro Limited
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_RPM_ICC_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_RPM_ICC_H
+
+#define RPM_ACTIVE_TAG (1 << 0)
+#define RPM_SLEEP_TAG (1 << 1)
+#define RPM_ALWAYS_TAG (RPM_ACTIVE_TAG | RPM_SLEEP_TAG)
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,sm8250.h b/include/dt-bindings/interconnect/qcom,sm8250.h
index a4af5cc19271..2a656c02df4b 100644
--- a/include/dt-bindings/interconnect/qcom,sm8250.h
+++ b/include/dt-bindings/interconnect/qcom,sm8250.h
@@ -166,4 +166,11 @@
#define SLAVE_QDSS_STM 17
#define SLAVE_TCU 18
+#define MASTER_QUP_CORE_0 0
+#define MASTER_QUP_CORE_1 1
+#define MASTER_QUP_CORE_2 2
+#define SLAVE_QUP_CORE_0 3
+#define SLAVE_QUP_CORE_1 4
+#define SLAVE_QUP_CORE_2 5
+
#endif
diff --git a/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h b/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h
new file mode 100644
index 000000000000..bd415cb7b669
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/amlogic,meson-g12a-gpio-intc.h
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc. All rights reserved.
+ * Author: Huqiang Qin <huqiang.qin@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H
+#define _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H
+
+/* IRQID[11:0] - GPIOAO[11:0] */
+#define IRQID_GPIOAO_0 0
+#define IRQID_GPIOAO_1 1
+#define IRQID_GPIOAO_2 2
+#define IRQID_GPIOAO_3 3
+#define IRQID_GPIOAO_4 4
+#define IRQID_GPIOAO_5 5
+#define IRQID_GPIOAO_6 6
+#define IRQID_GPIOAO_7 7
+#define IRQID_GPIOAO_8 8
+#define IRQID_GPIOAO_9 9
+#define IRQID_GPIOAO_10 10
+#define IRQID_GPIOAO_11 11
+
+/* IRQID[27:12] - GPIOZ[15:0] */
+#define IRQID_GPIOZ_0 12
+#define IRQID_GPIOZ_1 13
+#define IRQID_GPIOZ_2 14
+#define IRQID_GPIOZ_3 15
+#define IRQID_GPIOZ_4 16
+#define IRQID_GPIOZ_5 17
+#define IRQID_GPIOZ_6 18
+#define IRQID_GPIOZ_7 19
+#define IRQID_GPIOZ_8 20
+#define IRQID_GPIOZ_9 21
+#define IRQID_GPIOZ_10 22
+#define IRQID_GPIOZ_11 23
+#define IRQID_GPIOZ_12 24
+#define IRQID_GPIOZ_13 25
+#define IRQID_GPIOZ_14 26
+#define IRQID_GPIOZ_15 27
+
+/* IRQID[36:28] - GPIOH[8:0] */
+#define IRQID_GPIOH_0 28
+#define IRQID_GPIOH_1 29
+#define IRQID_GPIOH_2 30
+#define IRQID_GPIOH_3 31
+#define IRQID_GPIOH_4 32
+#define IRQID_GPIOH_5 33
+#define IRQID_GPIOH_6 34
+#define IRQID_GPIOH_7 35
+#define IRQID_GPIOH_8 36
+
+/* IRQID[52:37] - BOOT[15:0] */
+#define IRQID_BOOT_0 37
+#define IRQID_BOOT_1 38
+#define IRQID_BOOT_2 39
+#define IRQID_BOOT_3 40
+#define IRQID_BOOT_4 41
+#define IRQID_BOOT_5 42
+#define IRQID_BOOT_6 43
+#define IRQID_BOOT_7 44
+#define IRQID_BOOT_8 45
+#define IRQID_BOOT_9 46
+#define IRQID_BOOT_10 47
+#define IRQID_BOOT_11 48
+#define IRQID_BOOT_12 49
+#define IRQID_BOOT_13 50
+#define IRQID_BOOT_14 51
+#define IRQID_BOOT_15 52
+
+/* IRQID[60:53] - GPIOC[7:0] */
+#define IRQID_GPIOC_0 53
+#define IRQID_GPIOC_1 54
+#define IRQID_GPIOC_2 55
+#define IRQID_GPIOC_3 56
+#define IRQID_GPIOC_4 57
+#define IRQID_GPIOC_5 58
+#define IRQID_GPIOC_6 59
+#define IRQID_GPIOC_7 60
+
+/* IRQID[76:61] - GPIOA[15:0] */
+#define IRQID_GPIOA_0 61
+#define IRQID_GPIOA_1 62
+#define IRQID_GPIOA_2 63
+#define IRQID_GPIOA_3 64
+#define IRQID_GPIOA_4 65
+#define IRQID_GPIOA_5 66
+#define IRQID_GPIOA_6 67
+#define IRQID_GPIOA_7 68
+#define IRQID_GPIOA_8 69
+#define IRQID_GPIOA_9 70
+#define IRQID_GPIOA_10 71
+#define IRQID_GPIOA_11 72
+#define IRQID_GPIOA_12 73
+#define IRQID_GPIOA_13 74
+#define IRQID_GPIOA_14 75
+#define IRQID_GPIOA_15 76
+
+/* IRQID[96:77] - GPIOX[19:0] */
+#define IRQID_GPIOX_0 77
+#define IRQID_GPIOX_1 78
+#define IRQID_GPIOX_2 79
+#define IRQID_GPIOX_3 80
+#define IRQID_GPIOX_4 81
+#define IRQID_GPIOX_5 82
+#define IRQID_GPIOX_6 83
+#define IRQID_GPIOX_7 84
+#define IRQID_GPIOX_8 85
+#define IRQID_GPIOX_9 86
+#define IRQID_GPIOX_10 87
+#define IRQID_GPIOX_11 88
+#define IRQID_GPIOX_12 89
+#define IRQID_GPIOX_13 90
+#define IRQID_GPIOX_14 91
+#define IRQID_GPIOX_15 92
+#define IRQID_GPIOX_16 93
+#define IRQID_GPIOX_17 94
+#define IRQID_GPIOX_18 95
+#define IRQID_GPIOX_19 96
+
+/* IRQID[99:97] - GPIOE[2:0] */
+#define IRQID_GPIOE_0 97
+#define IRQID_GPIOE_1 98
+#define IRQID_GPIOE_2 99
+
+#endif /* _DT_BINDINGS_IRQ_MESON_G12A_GPIO_H */
diff --git a/include/dt-bindings/memory/mediatek,mt8188-memory-port.h b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
new file mode 100644
index 000000000000..337ab11262af
--- /dev/null
+++ b/include/dt-bindings/memory/mediatek,mt8188-memory-port.h
@@ -0,0 +1,489 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2022 MediaTek Inc.
+ * Author: Chengci Xu <chengci.xu@mediatek.com>
+ */
+#ifndef _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+#define _DT_BINDINGS_MEMORY_MEDIATEK_MT8188_LARB_PORT_H_
+
+#include <dt-bindings/memory/mtk-memory-port.h>
+
+/*
+ * MM IOMMU larbs:
+ * From below, for example larb11 has larb11a/larb11b/larb11c,
+ * the index of larb is not in order. So we reindexed these larbs from a
+ * software view.
+ */
+#define SMI_L0_ID 0
+#define SMI_L1_ID 1
+#define SMI_L2_ID 2
+#define SMI_L3_ID 3
+#define SMI_L4_ID 4
+#define SMI_L5_ID 5
+#define SMI_L6_ID 6
+#define SMI_L7_ID 7
+#define SMI_L9_ID 8
+#define SMI_L10_ID 9
+#define SMI_L11A_ID 10
+#define SMI_L11B_ID 11
+#define SMI_L11C_ID 12
+#define SMI_L12_ID 13
+#define SMI_L13_ID 14
+#define SMI_L14_ID 15
+#define SMI_L15_ID 16
+#define SMI_L16A_ID 17
+#define SMI_L16B_ID 18
+#define SMI_L17A_ID 19
+#define SMI_L17B_ID 20
+#define SMI_L19_ID 21
+#define SMI_L21_ID 22
+#define SMI_L23_ID 23
+#define SMI_L27_ID 24
+#define SMI_L28_ID 25
+
+/*
+ * MM IOMMU supports 16GB dma address. We separate it to four ranges:
+ * 0 ~ 4G; 4G ~ 8G; 8G ~ 12G; 12G ~ 16G, we could adjust these masters
+ * locate in anyone region. BUT:
+ * a) Make sure all the ports inside a larb are in one range.
+ * b) The iova of any master can NOT cross the 4G/8G/12G boundary.
+ *
+ * This is the suggested mapping in this SoC:
+ *
+ * modules dma-address-region larbs-ports
+ * disp 0 ~ 4G larb0/1/2/3
+ * vcodec 4G ~ 8G larb19(21)[1]/21(22)/23
+ * cam/mdp 8G ~ 12G the other larbs.
+ * N/A 12G ~ 16G
+ * CCU0 0x24000_0000 ~ 0x243ff_ffff larb27(24): port 0/1
+ * CCU1 0x24400_0000 ~ 0x247ff_ffff larb27(24): port 2/3
+ *
+ * This SoC have two MM IOMMU HWs, this is the connected information:
+ * iommu-vdo: larb0/2/5/9/10/11A/11C/13/16B/17B/19/21
+ * iommu-vpp: larb1/3/4/6/7/11B/12/14/15/16A/17A/23/27
+ *
+ * [1]: This is larb19, but the index is 21 from the SW view.
+ */
+
+/* MM IOMMU ports */
+/* LARB 0 -- VDO-0 */
+#define M4U_PORT_L0_DISP_RDMA1 MTK_M4U_ID(SMI_L0_ID, 0)
+#define M4U_PORT_L0_DISP_WDMA0 MTK_M4U_ID(SMI_L0_ID, 1)
+#define M4U_PORT_L0_DISP_OVL0_RDMA0 MTK_M4U_ID(SMI_L0_ID, 2)
+#define M4U_PORT_L0_DISP_OVL0_RDMA1 MTK_M4U_ID(SMI_L0_ID, 3)
+#define M4U_PORT_L0_DISP_OVL0_HDR MTK_M4U_ID(SMI_L0_ID, 4)
+#define M4U_PORT_L0_DISP_POSTMASK0 MTK_M4U_ID(SMI_L0_ID, 5)
+#define M4U_PORT_L0_DISP_FAKE_ENG0 MTK_M4U_ID(SMI_L0_ID, 6)
+
+/* LARB 1 -- VD0-0 */
+#define M4U_PORT_L1_DISP_RDMA0 MTK_M4U_ID(SMI_L1_ID, 0)
+#define M4U_PORT_L1_DISP_WDMA1 MTK_M4U_ID(SMI_L1_ID, 1)
+#define M4U_PORT_L1_DISP_OVL1_RDMA0 MTK_M4U_ID(SMI_L1_ID, 2)
+#define M4U_PORT_L1_DISP_OVL1_RDMA1 MTK_M4U_ID(SMI_L1_ID, 3)
+#define M4U_PORT_L1_DISP_OVL1_HDR MTK_M4U_ID(SMI_L1_ID, 4)
+#define M4U_PORT_L1_DISP_WROT0 MTK_M4U_ID(SMI_L1_ID, 5)
+#define M4U_PORT_L1_DISP_FAKE_ENG1 MTK_M4U_ID(SMI_L1_ID, 6)
+
+/* LARB 2 -- VDO-1 */
+#define M4U_PORT_L2_MDP_RDMA0 MTK_M4U_ID(SMI_L2_ID, 0)
+#define M4U_PORT_L2_MDP_RDMA2 MTK_M4U_ID(SMI_L2_ID, 1)
+#define M4U_PORT_L2_MDP_RDMA4 MTK_M4U_ID(SMI_L2_ID, 2)
+#define M4U_PORT_L2_MDP_RDMA6 MTK_M4U_ID(SMI_L2_ID, 3)
+#define M4U_PORT_L2_DISP_FAKE1 MTK_M4U_ID(SMI_L2_ID, 4)
+
+/* LARB 3 -- VDO-1 */
+#define M4U_PORT_L3_MDP_RDMA1 MTK_M4U_ID(SMI_L3_ID, 0)
+#define M4U_PORT_L3_MDP_RDMA3 MTK_M4U_ID(SMI_L3_ID, 1)
+#define M4U_PORT_L3_MDP_RDMA5 MTK_M4U_ID(SMI_L3_ID, 2)
+#define M4U_PORT_L3_MDP_RDMA7 MTK_M4U_ID(SMI_L3_ID, 3)
+#define M4U_PORT_L3_HDR_DS_SMI MTK_M4U_ID(SMI_L3_ID, 4)
+#define M4U_PORT_L3_HDR_ADL_SMI MTK_M4U_ID(SMI_L3_ID, 5)
+#define M4U_PORT_L3_DISP_FAKE1 MTK_M4U_ID(SMI_L3_ID, 6)
+
+/* LARB 4 -- VPP-0 */
+#define M4U_PORT_L4_MDP_RDMA MTK_M4U_ID(SMI_L4_ID, 0)
+#define M4U_PORT_L4_MDP_FG MTK_M4U_ID(SMI_L4_ID, 1)
+#define M4U_PORT_L4_MDP_OVL MTK_M4U_ID(SMI_L4_ID, 2)
+#define M4U_PORT_L4_MDP_WROT MTK_M4U_ID(SMI_L4_ID, 3)
+#define M4U_PORT_L4_FAKE_ENG MTK_M4U_ID(SMI_L4_ID, 4)
+#define M4U_PORT_L4_DISP_RDMA MTK_M4U_ID(SMI_L4_ID, 5)
+#define M4U_PORT_L4_DISP_WDMA MTK_M4U_ID(SMI_L4_ID, 6)
+
+/* LARB 5 -- VPP-1 */
+#define M4U_PORT_L5_SVPP1_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 0)
+#define M4U_PORT_L5_SVPP1_MDP_FG MTK_M4U_ID(SMI_L5_ID, 1)
+#define M4U_PORT_L5_SVPP1_MDP_OVL MTK_M4U_ID(SMI_L5_ID, 2)
+#define M4U_PORT_L5_SVPP1_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 3)
+#define M4U_PORT_L5_SVPP2_MDP_RDMA MTK_M4U_ID(SMI_L5_ID, 4)
+#define M4U_PORT_L5_SVPP2_MDP_FG MTK_M4U_ID(SMI_L5_ID, 5)
+#define M4U_PORT_L5_SVPP2_MDP_WROT MTK_M4U_ID(SMI_L5_ID, 6)
+#define M4U_PORT_L5_LARB5_FAKE_ENG MTK_M4U_ID(SMI_L5_ID, 7)
+
+/* LARB 6 -- VPP-1 */
+#define M4U_PORT_L6_SVPP3_MDP_RDMA MTK_M4U_ID(SMI_L6_ID, 0)
+#define M4U_PORT_L6_SVPP3_MDP_FG MTK_M4U_ID(SMI_L6_ID, 1)
+#define M4U_PORT_L6_SVPP3_MDP_WROT MTK_M4U_ID(SMI_L6_ID, 2)
+#define M4U_PORT_L6_LARB6_FAKE_ENG MTK_M4U_ID(SMI_L6_ID, 3)
+
+/* LARB 7 -- WPE */
+#define M4U_PORT_L7_WPE_RDMA_0 MTK_M4U_ID(SMI_L7_ID, 0)
+#define M4U_PORT_L7_WPE_RDMA_1 MTK_M4U_ID(SMI_L7_ID, 1)
+#define M4U_PORT_L7_WPE_WDMA_0 MTK_M4U_ID(SMI_L7_ID, 2)
+
+/* LARB 9 -- IMG-M */
+#define M4U_PORT_L9_IMGI_T1_A MTK_M4U_ID(SMI_L9_ID, 0)
+#define M4U_PORT_L9_UFDI_T1_A MTK_M4U_ID(SMI_L9_ID, 1)
+#define M4U_PORT_L9_IMGBI_T1_A MTK_M4U_ID(SMI_L9_ID, 2)
+#define M4U_PORT_L9_IMGCI_T1_A MTK_M4U_ID(SMI_L9_ID, 3)
+#define M4U_PORT_L9_SMTI_T1_A MTK_M4U_ID(SMI_L9_ID, 4)
+#define M4U_PORT_L9_SMTI_T4_A MTK_M4U_ID(SMI_L9_ID, 5)
+#define M4U_PORT_L9_TNCSTI_T1_A MTK_M4U_ID(SMI_L9_ID, 6)
+#define M4U_PORT_L9_TNCSTI_T4_A MTK_M4U_ID(SMI_L9_ID, 7)
+#define M4U_PORT_L9_YUVO_T1_A MTK_M4U_ID(SMI_L9_ID, 8)
+#define M4U_PORT_L9_YUVBO_T1_A MTK_M4U_ID(SMI_L9_ID, 9)
+#define M4U_PORT_L9_YUVCO_T1_A MTK_M4U_ID(SMI_L9_ID, 10)
+#define M4U_PORT_L9_TIMGO_T1_A MTK_M4U_ID(SMI_L9_ID, 11)
+#define M4U_PORT_L9_YUVO_T2_A MTK_M4U_ID(SMI_L9_ID, 12)
+#define M4U_PORT_L9_YUVO_T5_A MTK_M4U_ID(SMI_L9_ID, 13)
+#define M4U_PORT_L9_IMGI_T1_B MTK_M4U_ID(SMI_L9_ID, 14)
+#define M4U_PORT_L9_IMGBI_T1_B MTK_M4U_ID(SMI_L9_ID, 15)
+#define M4U_PORT_L9_IMGCI_T1_B MTK_M4U_ID(SMI_L9_ID, 16)
+#define M4U_PORT_L9_SMTI_T4_B MTK_M4U_ID(SMI_L9_ID, 17)
+#define M4U_PORT_L9_TNCSO_T1_A MTK_M4U_ID(SMI_L9_ID, 18)
+#define M4U_PORT_L9_SMTO_T1_A MTK_M4U_ID(SMI_L9_ID, 19)
+#define M4U_PORT_L9_SMTO_T4_A MTK_M4U_ID(SMI_L9_ID, 20)
+#define M4U_PORT_L9_TNCSTO_T1_A MTK_M4U_ID(SMI_L9_ID, 21)
+#define M4U_PORT_L9_YUVO_T2_B MTK_M4U_ID(SMI_L9_ID, 22)
+#define M4U_PORT_L9_YUVO_T5_B MTK_M4U_ID(SMI_L9_ID, 23)
+#define M4U_PORT_L9_SMTO_T4_B MTK_M4U_ID(SMI_L9_ID, 24)
+
+/* LARB 10 -- IMG-D */
+#define M4U_PORT_L10_IMGI_D1 MTK_M4U_ID(SMI_L10_ID, 0)
+#define M4U_PORT_L10_IMGBI_D1 MTK_M4U_ID(SMI_L10_ID, 1)
+#define M4U_PORT_L10_IMGCI_D1 MTK_M4U_ID(SMI_L10_ID, 2)
+#define M4U_PORT_L10_IMGDI_D1 MTK_M4U_ID(SMI_L10_ID, 3)
+#define M4U_PORT_L10_DEPI_D1 MTK_M4U_ID(SMI_L10_ID, 4)
+#define M4U_PORT_L10_DMGI_D1 MTK_M4U_ID(SMI_L10_ID, 5)
+#define M4U_PORT_L10_SMTI_D1 MTK_M4U_ID(SMI_L10_ID, 6)
+#define M4U_PORT_L10_RECI_D1 MTK_M4U_ID(SMI_L10_ID, 7)
+#define M4U_PORT_L10_RECI_D1_N MTK_M4U_ID(SMI_L10_ID, 8)
+#define M4U_PORT_L10_TNRWI_D1 MTK_M4U_ID(SMI_L10_ID, 9)
+#define M4U_PORT_L10_TNRCI_D1 MTK_M4U_ID(SMI_L10_ID, 10)
+#define M4U_PORT_L10_TNRCI_D1_N MTK_M4U_ID(SMI_L10_ID, 11)
+#define M4U_PORT_L10_IMG4O_D1 MTK_M4U_ID(SMI_L10_ID, 12)
+#define M4U_PORT_L10_IMG4BO_D1 MTK_M4U_ID(SMI_L10_ID, 13)
+#define M4U_PORT_L10_SMTI_D8 MTK_M4U_ID(SMI_L10_ID, 14)
+#define M4U_PORT_L10_SMTO_D1 MTK_M4U_ID(SMI_L10_ID, 15)
+#define M4U_PORT_L10_TNRMO_D1 MTK_M4U_ID(SMI_L10_ID, 16)
+#define M4U_PORT_L10_TNRMO_D1_N MTK_M4U_ID(SMI_L10_ID, 17)
+#define M4U_PORT_L10_SMTO_D8 MTK_M4U_ID(SMI_L10_ID, 18)
+#define M4U_PORT_L10_DBGO_D1 MTK_M4U_ID(SMI_L10_ID, 19)
+
+/* LARB 11A -- IMG-D */
+#define M4U_PORT_L11A_WPE_RDMA_0 MTK_M4U_ID(SMI_L11A_ID, 0)
+#define M4U_PORT_L11A_WPE_RDMA_1 MTK_M4U_ID(SMI_L11A_ID, 1)
+#define M4U_PORT_L11A_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 2)
+#define M4U_PORT_L11A_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11A_ID, 3)
+#define M4U_PORT_L11A_WPE_CQ0 MTK_M4U_ID(SMI_L11A_ID, 4)
+#define M4U_PORT_L11A_WPE_CQ1 MTK_M4U_ID(SMI_L11A_ID, 5)
+#define M4U_PORT_L11A_PIMGI_P1 MTK_M4U_ID(SMI_L11A_ID, 6)
+#define M4U_PORT_L11A_PIMGBI_P1 MTK_M4U_ID(SMI_L11A_ID, 7)
+#define M4U_PORT_L11A_PIMGCI_P1 MTK_M4U_ID(SMI_L11A_ID, 8)
+#define M4U_PORT_L11A_IMGI_T1_C MTK_M4U_ID(SMI_L11A_ID, 9)
+#define M4U_PORT_L11A_IMGBI_T1_C MTK_M4U_ID(SMI_L11A_ID, 10)
+#define M4U_PORT_L11A_IMGCI_T1_C MTK_M4U_ID(SMI_L11A_ID, 11)
+#define M4U_PORT_L11A_SMTI_T1_C MTK_M4U_ID(SMI_L11A_ID, 12)
+#define M4U_PORT_L11A_SMTI_T4_C MTK_M4U_ID(SMI_L11A_ID, 13)
+#define M4U_PORT_L11A_SMTI_T6_C MTK_M4U_ID(SMI_L11A_ID, 14)
+#define M4U_PORT_L11A_YUVO_T1_C MTK_M4U_ID(SMI_L11A_ID, 15)
+#define M4U_PORT_L11A_YUVBO_T1_C MTK_M4U_ID(SMI_L11A_ID, 16)
+#define M4U_PORT_L11A_YUVCO_T1_C MTK_M4U_ID(SMI_L11A_ID, 17)
+#define M4U_PORT_L11A_WPE_WDMA_0 MTK_M4U_ID(SMI_L11A_ID, 18)
+#define M4U_PORT_L11A_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11A_ID, 19)
+#define M4U_PORT_L11A_WROT_P1 MTK_M4U_ID(SMI_L11A_ID, 20)
+#define M4U_PORT_L11A_TCCSO_P1 MTK_M4U_ID(SMI_L11A_ID, 21)
+#define M4U_PORT_L11A_TCCSI_P1 MTK_M4U_ID(SMI_L11A_ID, 22)
+#define M4U_PORT_L11A_TIMGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 23)
+#define M4U_PORT_L11A_YUVO_T2_C MTK_M4U_ID(SMI_L11A_ID, 24)
+#define M4U_PORT_L11A_YUVO_T5_C MTK_M4U_ID(SMI_L11A_ID, 25)
+#define M4U_PORT_L11A_SMTO_T1_C MTK_M4U_ID(SMI_L11A_ID, 26)
+#define M4U_PORT_L11A_SMTO_T4_C MTK_M4U_ID(SMI_L11A_ID, 27)
+#define M4U_PORT_L11A_SMTO_T6_C MTK_M4U_ID(SMI_L11A_ID, 28)
+#define M4U_PORT_L11A_DBGO_T1_C MTK_M4U_ID(SMI_L11A_ID, 29)
+
+/* LARB 11B -- IMG-D */
+#define M4U_PORT_L11B_WPE_RDMA_0 MTK_M4U_ID(SMI_L11B_ID, 0)
+#define M4U_PORT_L11B_WPE_RDMA_1 MTK_M4U_ID(SMI_L11B_ID, 1)
+#define M4U_PORT_L11B_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 2)
+#define M4U_PORT_L11B_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11B_ID, 3)
+#define M4U_PORT_L11B_WPE_CQ0 MTK_M4U_ID(SMI_L11B_ID, 4)
+#define M4U_PORT_L11B_WPE_CQ1 MTK_M4U_ID(SMI_L11B_ID, 5)
+#define M4U_PORT_L11B_PIMGI_P1 MTK_M4U_ID(SMI_L11B_ID, 6)
+#define M4U_PORT_L11B_PIMGBI_P1 MTK_M4U_ID(SMI_L11B_ID, 7)
+#define M4U_PORT_L11B_PIMGCI_P1 MTK_M4U_ID(SMI_L11B_ID, 8)
+#define M4U_PORT_L11B_IMGI_T1_C MTK_M4U_ID(SMI_L11B_ID, 9)
+#define M4U_PORT_L11B_IMGBI_T1_C MTK_M4U_ID(SMI_L11B_ID, 10)
+#define M4U_PORT_L11B_IMGCI_T1_C MTK_M4U_ID(SMI_L11B_ID, 11)
+#define M4U_PORT_L11B_SMTI_T1_C MTK_M4U_ID(SMI_L11B_ID, 12)
+#define M4U_PORT_L11B_SMTI_T4_C MTK_M4U_ID(SMI_L11B_ID, 13)
+#define M4U_PORT_L11B_SMTI_T6_C MTK_M4U_ID(SMI_L11B_ID, 14)
+#define M4U_PORT_L11B_YUVO_T1_C MTK_M4U_ID(SMI_L11B_ID, 15)
+#define M4U_PORT_L11B_YUVBO_T1_C MTK_M4U_ID(SMI_L11B_ID, 16)
+#define M4U_PORT_L11B_YUVCO_T1_C MTK_M4U_ID(SMI_L11B_ID, 17)
+#define M4U_PORT_L11B_WPE_WDMA_0 MTK_M4U_ID(SMI_L11B_ID, 18)
+#define M4U_PORT_L11B_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11B_ID, 19)
+#define M4U_PORT_L11B_WROT_P1 MTK_M4U_ID(SMI_L11B_ID, 20)
+#define M4U_PORT_L11B_TCCSO_P1 MTK_M4U_ID(SMI_L11B_ID, 21)
+#define M4U_PORT_L11B_TCCSI_P1 MTK_M4U_ID(SMI_L11B_ID, 22)
+#define M4U_PORT_L11B_TIMGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 23)
+#define M4U_PORT_L11B_YUVO_T2_C MTK_M4U_ID(SMI_L11B_ID, 24)
+#define M4U_PORT_L11B_YUVO_T5_C MTK_M4U_ID(SMI_L11B_ID, 25)
+#define M4U_PORT_L11B_SMTO_T1_C MTK_M4U_ID(SMI_L11B_ID, 26)
+#define M4U_PORT_L11B_SMTO_T4_C MTK_M4U_ID(SMI_L11B_ID, 27)
+#define M4U_PORT_L11B_SMTO_T6_C MTK_M4U_ID(SMI_L11B_ID, 28)
+#define M4U_PORT_L11B_DBGO_T1_C MTK_M4U_ID(SMI_L11B_ID, 29)
+
+/* LARB 11C -- IMG-D */
+#define M4U_PORT_L11C_WPE_RDMA_0 MTK_M4U_ID(SMI_L11C_ID, 0)
+#define M4U_PORT_L11C_WPE_RDMA_1 MTK_M4U_ID(SMI_L11C_ID, 1)
+#define M4U_PORT_L11C_WPE_RDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 2)
+#define M4U_PORT_L11C_WPE_RDMA_4P_1 MTK_M4U_ID(SMI_L11C_ID, 3)
+#define M4U_PORT_L11C_WPE_CQ0 MTK_M4U_ID(SMI_L11C_ID, 4)
+#define M4U_PORT_L11C_WPE_CQ1 MTK_M4U_ID(SMI_L11C_ID, 5)
+#define M4U_PORT_L11C_PIMGI_P1 MTK_M4U_ID(SMI_L11C_ID, 6)
+#define M4U_PORT_L11C_PIMGBI_P1 MTK_M4U_ID(SMI_L11C_ID, 7)
+#define M4U_PORT_L11C_PIMGCI_P1 MTK_M4U_ID(SMI_L11C_ID, 8)
+#define M4U_PORT_L11C_IMGI_T1_C MTK_M4U_ID(SMI_L11C_ID, 9)
+#define M4U_PORT_L11C_IMGBI_T1_C MTK_M4U_ID(SMI_L11C_ID, 10)
+#define M4U_PORT_L11C_IMGCI_T1_C MTK_M4U_ID(SMI_L11C_ID, 11)
+#define M4U_PORT_L11C_SMTI_T1_C MTK_M4U_ID(SMI_L11C_ID, 12)
+#define M4U_PORT_L11C_SMTI_T4_C MTK_M4U_ID(SMI_L11C_ID, 13)
+#define M4U_PORT_L11C_SMTI_T6_C MTK_M4U_ID(SMI_L11C_ID, 14)
+#define M4U_PORT_L11C_YUVO_T1_C MTK_M4U_ID(SMI_L11C_ID, 15)
+#define M4U_PORT_L11C_YUVBO_T1_C MTK_M4U_ID(SMI_L11C_ID, 16)
+#define M4U_PORT_L11C_YUVCO_T1_C MTK_M4U_ID(SMI_L11C_ID, 17)
+#define M4U_PORT_L11C_WPE_WDMA_0 MTK_M4U_ID(SMI_L11C_ID, 18)
+#define M4U_PORT_L11C_WPE_WDMA_4P_0 MTK_M4U_ID(SMI_L11C_ID, 19)
+#define M4U_PORT_L11C_WROT_P1 MTK_M4U_ID(SMI_L11C_ID, 20)
+#define M4U_PORT_L11C_TCCSO_P1 MTK_M4U_ID(SMI_L11C_ID, 21)
+#define M4U_PORT_L11C_TCCSI_P1 MTK_M4U_ID(SMI_L11C_ID, 22)
+#define M4U_PORT_L11C_TIMGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 23)
+#define M4U_PORT_L11C_YUVO_T2_C MTK_M4U_ID(SMI_L11C_ID, 24)
+#define M4U_PORT_L11C_YUVO_T5_C MTK_M4U_ID(SMI_L11C_ID, 25)
+#define M4U_PORT_L11C_SMTO_T1_C MTK_M4U_ID(SMI_L11C_ID, 26)
+#define M4U_PORT_L11C_SMTO_T4_C MTK_M4U_ID(SMI_L11C_ID, 27)
+#define M4U_PORT_L11C_SMTO_T6_C MTK_M4U_ID(SMI_L11C_ID, 28)
+#define M4U_PORT_L11C_DBGO_T1_C MTK_M4U_ID(SMI_L11C_ID, 29)
+
+/* LARB 12 -- IPE */
+#define M4U_PORT_L12_FDVT_RDA_0 MTK_M4U_ID(SMI_L12_ID, 0)
+#define M4U_PORT_L12_FDVT_RDB_0 MTK_M4U_ID(SMI_L12_ID, 1)
+#define M4U_PORT_L12_FDVT_WRA_0 MTK_M4U_ID(SMI_L12_ID, 2)
+#define M4U_PORT_L12_FDVT_WRB_0 MTK_M4U_ID(SMI_L12_ID, 3)
+#define M4U_PORT_L12_ME_RDMA MTK_M4U_ID(SMI_L12_ID, 4)
+#define M4U_PORT_L12_ME_WDMA MTK_M4U_ID(SMI_L12_ID, 5)
+#define M4U_PORT_L12_DVS_RDMA MTK_M4U_ID(SMI_L12_ID, 6)
+#define M4U_PORT_L12_DVS_WDMA MTK_M4U_ID(SMI_L12_ID, 7)
+#define M4U_PORT_L12_DVP_RDMA MTK_M4U_ID(SMI_L12_ID, 8)
+#define M4U_PORT_L12_DVP_WDMA MTK_M4U_ID(SMI_L12_ID, 9)
+#define M4U_PORT_L12_FDVT_2ND_RDA_0 MTK_M4U_ID(SMI_L12_ID, 10)
+#define M4U_PORT_L12_FDVT_2ND_RDB_0 MTK_M4U_ID(SMI_L12_ID, 11)
+#define M4U_PORT_L12_FDVT_2ND_WRA_0 MTK_M4U_ID(SMI_L12_ID, 12)
+#define M4U_PORT_L12_FDVT_2ND_WRB_0 MTK_M4U_ID(SMI_L12_ID, 13)
+#define M4U_PORT_L12_DHZEI_E1 MTK_M4U_ID(SMI_L12_ID, 14)
+#define M4U_PORT_L12_DHZEO_E1 MTK_M4U_ID(SMI_L12_ID, 15)
+
+/* LARB 13 -- CAM-1 */
+#define M4U_PORT_L13_CAMSV_CQI_E1 MTK_M4U_ID(SMI_L13_ID, 0)
+#define M4U_PORT_L13_CAMSV_CQI_E2 MTK_M4U_ID(SMI_L13_ID, 1)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 2)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 3)
+#define M4U_PORT_L13_GCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 4)
+#define M4U_PORT_L13_GCAMSV_C_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 5)
+#define M4U_PORT_L13_PDAI_A_0 MTK_M4U_ID(SMI_L13_ID, 6)
+#define M4U_PORT_L13_PDAI_A_1 MTK_M4U_ID(SMI_L13_ID, 7)
+#define M4U_PORT_L13_CAMSV_CQI_B_E1 MTK_M4U_ID(SMI_L13_ID, 8)
+#define M4U_PORT_L13_CAMSV_CQI_B_E2 MTK_M4U_ID(SMI_L13_ID, 9)
+#define M4U_PORT_L13_CAMSV_CQI_C_E1 MTK_M4U_ID(SMI_L13_ID, 10)
+#define M4U_PORT_L13_CAMSV_CQI_C_E2 MTK_M4U_ID(SMI_L13_ID, 11)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 12)
+#define M4U_PORT_L13_GCAMSV_E_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 13)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 14)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 15)
+#define M4U_PORT_L13_GCAMSV_A_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 16)
+#define M4U_PORT_L13_GCAMSV_C_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 17)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_1 MTK_M4U_ID(SMI_L13_ID, 18)
+#define M4U_PORT_L13_GCAMSV_E_UFEO_2 MTK_M4U_ID(SMI_L13_ID, 19)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_1 MTK_M4U_ID(SMI_L13_ID, 20)
+#define M4U_PORT_L13_GCAMSV_G_IMGO_2 MTK_M4U_ID(SMI_L13_ID, 21)
+#define M4U_PORT_L13_PDAO_A MTK_M4U_ID(SMI_L13_ID, 22)
+#define M4U_PORT_L13_PDAO_C MTK_M4U_ID(SMI_L13_ID, 23)
+
+/* LARB 14 -- CAM-1 */
+#define M4U_PORT_L14_GCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 0)
+#define M4U_PORT_L14_GCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 1)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 2)
+#define M4U_PORT_L14_SCAMSV_A_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 3)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 4)
+#define M4U_PORT_L14_SCAMSV_B_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 5)
+#define M4U_PORT_L14_PDAI_B_0 MTK_M4U_ID(SMI_L14_ID, 6)
+#define M4U_PORT_L14_PDAI_B_1 MTK_M4U_ID(SMI_L14_ID, 7)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 8)
+#define M4U_PORT_L14_GCAMSV_D_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 9)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 10)
+#define M4U_PORT_L14_GCAMSV_F_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 11)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_1 MTK_M4U_ID(SMI_L14_ID, 12)
+#define M4U_PORT_L14_GCAMSV_H_IMGO_2 MTK_M4U_ID(SMI_L14_ID, 13)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 14)
+#define M4U_PORT_L14_GCAMSV_B_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 15)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_1 MTK_M4U_ID(SMI_L14_ID, 16)
+#define M4U_PORT_L14_GCAMSV_D_UFEO_2 MTK_M4U_ID(SMI_L14_ID, 17)
+#define M4U_PORT_L14_PDAO_B MTK_M4U_ID(SMI_L14_ID, 18)
+#define M4U_PORT_L14_IPUI MTK_M4U_ID(SMI_L14_ID, 19)
+#define M4U_PORT_L14_IPUO MTK_M4U_ID(SMI_L14_ID, 20)
+#define M4U_PORT_L14_IPU3O MTK_M4U_ID(SMI_L14_ID, 21)
+#define M4U_PORT_L14_FAKE MTK_M4U_ID(SMI_L14_ID, 22)
+
+/* LARB 15 -- IMG-D */
+#define M4U_PORT_L15_VIPI_D1 MTK_M4U_ID(SMI_L15_ID, 0)
+#define M4U_PORT_L15_VIPBI_D1 MTK_M4U_ID(SMI_L15_ID, 1)
+#define M4U_PORT_L15_SMTI_D6 MTK_M4U_ID(SMI_L15_ID, 2)
+#define M4U_PORT_L15_TNCSTI_D1 MTK_M4U_ID(SMI_L15_ID, 3)
+#define M4U_PORT_L15_TNCSTI_D4 MTK_M4U_ID(SMI_L15_ID, 4)
+#define M4U_PORT_L15_SMTI_D4 MTK_M4U_ID(SMI_L15_ID, 5)
+#define M4U_PORT_L15_IMG3O_D1 MTK_M4U_ID(SMI_L15_ID, 6)
+#define M4U_PORT_L15_IMG3BO_D1 MTK_M4U_ID(SMI_L15_ID, 7)
+#define M4U_PORT_L15_IMG3CO_D1 MTK_M4U_ID(SMI_L15_ID, 8)
+#define M4U_PORT_L15_IMG2O_D1 MTK_M4U_ID(SMI_L15_ID, 9)
+#define M4U_PORT_L15_SMTI_D9 MTK_M4U_ID(SMI_L15_ID, 10)
+#define M4U_PORT_L15_SMTO_D4 MTK_M4U_ID(SMI_L15_ID, 11)
+#define M4U_PORT_L15_FEO_D1 MTK_M4U_ID(SMI_L15_ID, 12)
+#define M4U_PORT_L15_TNCSO_D1 MTK_M4U_ID(SMI_L15_ID, 13)
+#define M4U_PORT_L15_TNCSTO_D1 MTK_M4U_ID(SMI_L15_ID, 14)
+#define M4U_PORT_L15_SMTO_D6 MTK_M4U_ID(SMI_L15_ID, 15)
+#define M4U_PORT_L15_SMTO_D9 MTK_M4U_ID(SMI_L15_ID, 16)
+#define M4U_PORT_L15_TNCO_D1 MTK_M4U_ID(SMI_L15_ID, 17)
+#define M4U_PORT_L15_TNCO_D1_N MTK_M4U_ID(SMI_L15_ID, 18)
+
+/* LARB 16A -- CAM */
+#define M4U_PORT_L16A_IMGO_R1 MTK_M4U_ID(SMI_L16A_ID, 0)
+#define M4U_PORT_L16A_CQI_R1 MTK_M4U_ID(SMI_L16A_ID, 1)
+#define M4U_PORT_L16A_CQI_R2 MTK_M4U_ID(SMI_L16A_ID, 2)
+#define M4U_PORT_L16A_BPCI_R1 MTK_M4U_ID(SMI_L16A_ID, 3)
+#define M4U_PORT_L16A_LSCI_R1 MTK_M4U_ID(SMI_L16A_ID, 4)
+#define M4U_PORT_L16A_RAWI_R2 MTK_M4U_ID(SMI_L16A_ID, 5)
+#define M4U_PORT_L16A_RAWI_R3 MTK_M4U_ID(SMI_L16A_ID, 6)
+#define M4U_PORT_L16A_UFDI_R2 MTK_M4U_ID(SMI_L16A_ID, 7)
+#define M4U_PORT_L16A_UFDI_R3 MTK_M4U_ID(SMI_L16A_ID, 8)
+#define M4U_PORT_L16A_RAWI_R4 MTK_M4U_ID(SMI_L16A_ID, 9)
+#define M4U_PORT_L16A_RAWI_R5 MTK_M4U_ID(SMI_L16A_ID, 10)
+#define M4U_PORT_L16A_AAI_R1 MTK_M4U_ID(SMI_L16A_ID, 11)
+#define M4U_PORT_L16A_UFDI_R5 MTK_M4U_ID(SMI_L16A_ID, 12)
+#define M4U_PORT_L16A_FHO_R1 MTK_M4U_ID(SMI_L16A_ID, 13)
+#define M4U_PORT_L16A_AAO_R1 MTK_M4U_ID(SMI_L16A_ID, 14)
+#define M4U_PORT_L16A_TSFSO_R1 MTK_M4U_ID(SMI_L16A_ID, 15)
+#define M4U_PORT_L16A_FLKO_R1 MTK_M4U_ID(SMI_L16A_ID, 16)
+
+/* LARB 16B -- CAM */
+#define M4U_PORT_L16B_IMGO_R1 MTK_M4U_ID(SMI_L16B_ID, 0)
+#define M4U_PORT_L16B_CQI_R1 MTK_M4U_ID(SMI_L16B_ID, 1)
+#define M4U_PORT_L16B_CQI_R2 MTK_M4U_ID(SMI_L16B_ID, 2)
+#define M4U_PORT_L16B_BPCI_R1 MTK_M4U_ID(SMI_L16B_ID, 3)
+#define M4U_PORT_L16B_LSCI_R1 MTK_M4U_ID(SMI_L16B_ID, 4)
+#define M4U_PORT_L16B_RAWI_R2 MTK_M4U_ID(SMI_L16B_ID, 5)
+#define M4U_PORT_L16B_RAWI_R3 MTK_M4U_ID(SMI_L16B_ID, 6)
+#define M4U_PORT_L16B_UFDI_R2 MTK_M4U_ID(SMI_L16B_ID, 7)
+#define M4U_PORT_L16B_UFDI_R3 MTK_M4U_ID(SMI_L16B_ID, 8)
+#define M4U_PORT_L16B_RAWI_R4 MTK_M4U_ID(SMI_L16B_ID, 9)
+#define M4U_PORT_L16B_RAWI_R5 MTK_M4U_ID(SMI_L16B_ID, 10)
+#define M4U_PORT_L16B_AAI_R1 MTK_M4U_ID(SMI_L16B_ID, 11)
+#define M4U_PORT_L16B_UFDI_R5 MTK_M4U_ID(SMI_L16B_ID, 12)
+#define M4U_PORT_L16B_FHO_R1 MTK_M4U_ID(SMI_L16B_ID, 13)
+#define M4U_PORT_L16B_AAO_R1 MTK_M4U_ID(SMI_L16B_ID, 14)
+#define M4U_PORT_L16B_TSFSO_R1 MTK_M4U_ID(SMI_L16B_ID, 15)
+#define M4U_PORT_L16B_FLKO_R1 MTK_M4U_ID(SMI_L16B_ID, 16)
+
+/* LARB 17A -- CAM */
+#define M4U_PORT_L17A_YUVO_R1 MTK_M4U_ID(SMI_L17A_ID, 0)
+#define M4U_PORT_L17A_YUVO_R3 MTK_M4U_ID(SMI_L17A_ID, 1)
+#define M4U_PORT_L17A_YUVCO_R1 MTK_M4U_ID(SMI_L17A_ID, 2)
+#define M4U_PORT_L17A_YUVO_R2 MTK_M4U_ID(SMI_L17A_ID, 3)
+#define M4U_PORT_L17A_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17A_ID, 4)
+#define M4U_PORT_L17A_DRZS4NO_R1 MTK_M4U_ID(SMI_L17A_ID, 5)
+#define M4U_PORT_L17A_TNCSO_R1 MTK_M4U_ID(SMI_L17A_ID, 6)
+
+/* LARB 17B -- CAM */
+#define M4U_PORT_L17B_YUVO_R1 MTK_M4U_ID(SMI_L17B_ID, 0)
+#define M4U_PORT_L17B_YUVO_R3 MTK_M4U_ID(SMI_L17B_ID, 1)
+#define M4U_PORT_L17B_YUVCO_R1 MTK_M4U_ID(SMI_L17B_ID, 2)
+#define M4U_PORT_L17B_YUVO_R2 MTK_M4U_ID(SMI_L17B_ID, 3)
+#define M4U_PORT_L17B_RZH1N2TO_R1 MTK_M4U_ID(SMI_L17B_ID, 4)
+#define M4U_PORT_L17B_DRZS4NO_R1 MTK_M4U_ID(SMI_L17B_ID, 5)
+#define M4U_PORT_L17B_TNCSO_R1 MTK_M4U_ID(SMI_L17B_ID, 6)
+
+/* LARB 19 -- VENC */
+#define M4U_PORT_L19_VENC_RCPU MTK_M4U_ID(SMI_L19_ID, 0)
+#define M4U_PORT_L19_VENC_REC MTK_M4U_ID(SMI_L19_ID, 1)
+#define M4U_PORT_L19_VENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 2)
+#define M4U_PORT_L19_VENC_SV_COMV MTK_M4U_ID(SMI_L19_ID, 3)
+#define M4U_PORT_L19_VENC_RD_COMV MTK_M4U_ID(SMI_L19_ID, 4)
+#define M4U_PORT_L19_VENC_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 5)
+#define M4U_PORT_L19_VENC_NBM_RDMA_LITE MTK_M4U_ID(SMI_L19_ID, 6)
+#define M4U_PORT_L19_JPGENC_Y_RDMA MTK_M4U_ID(SMI_L19_ID, 7)
+#define M4U_PORT_L19_JPGENC_C_RDMA MTK_M4U_ID(SMI_L19_ID, 8)
+#define M4U_PORT_L19_JPGENC_Q_TABLE MTK_M4U_ID(SMI_L19_ID, 9)
+#define M4U_PORT_L19_VENC_SUB_W_LUMA MTK_M4U_ID(SMI_L19_ID, 10)
+#define M4U_PORT_L19_VENC_FCS_NBM_RDMA MTK_M4U_ID(SMI_L19_ID, 11)
+#define M4U_PORT_L19_JPGENC_BSDMA MTK_M4U_ID(SMI_L19_ID, 12)
+#define M4U_PORT_L19_JPGDEC_WDMA_0 MTK_M4U_ID(SMI_L19_ID, 13)
+#define M4U_PORT_L19_JPGDEC_BSDMA_0 MTK_M4U_ID(SMI_L19_ID, 14)
+#define M4U_PORT_L19_VENC_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 15)
+#define M4U_PORT_L19_VENC_NBM_WDMA_LITE MTK_M4U_ID(SMI_L19_ID, 16)
+#define M4U_PORT_L19_VENC_FCS_NBM_WDMA MTK_M4U_ID(SMI_L19_ID, 17)
+#define M4U_PORT_L19_JPGDEC_WDMA_1 MTK_M4U_ID(SMI_L19_ID, 18)
+#define M4U_PORT_L19_JPGDEC_BSDMA_1 MTK_M4U_ID(SMI_L19_ID, 19)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_1 MTK_M4U_ID(SMI_L19_ID, 20)
+#define M4U_PORT_L19_JPGDEC_HUFF_OFFSET_0 MTK_M4U_ID(SMI_L19_ID, 21)
+#define M4U_PORT_L19_VENC_CUR_LUMA MTK_M4U_ID(SMI_L19_ID, 22)
+#define M4U_PORT_L19_VENC_CUR_CHROMA MTK_M4U_ID(SMI_L19_ID, 23)
+#define M4U_PORT_L19_VENC_REF_LUMA MTK_M4U_ID(SMI_L19_ID, 24)
+#define M4U_PORT_L19_VENC_REF_CHROMA MTK_M4U_ID(SMI_L19_ID, 25)
+#define M4U_PORT_L19_VENC_SUB_R_LUMA MTK_M4U_ID(SMI_L19_ID, 26)
+
+/* LARB 21 -- VDEC-CORE0 */
+#define M4U_PORT_L21_HW_VDEC_MC_EXT MTK_M4U_ID(SMI_L21_ID, 0)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT MTK_M4U_ID(SMI_L21_ID, 1)
+#define M4U_PORT_L21_HW_VDEC_PP_EXT MTK_M4U_ID(SMI_L21_ID, 2)
+#define M4U_PORT_L21_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(SMI_L21_ID, 3)
+#define M4U_PORT_L21_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(SMI_L21_ID, 4)
+#define M4U_PORT_L21_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(SMI_L21_ID, 5)
+#define M4U_PORT_L21_HW_VDEC_TILE_EXT MTK_M4U_ID(SMI_L21_ID, 6)
+#define M4U_PORT_L21_HW_VDEC_VLD_EXT MTK_M4U_ID(SMI_L21_ID, 7)
+#define M4U_PORT_L21_HW_VDEC_VLD2_EXT MTK_M4U_ID(SMI_L21_ID, 8)
+#define M4U_PORT_L21_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(SMI_L21_ID, 9)
+#define M4U_PORT_L21_HW_VDEC_UFO_EXT_C MTK_M4U_ID(SMI_L21_ID, 10)
+
+/* LARB 23 -- VDEC-SOC */
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD_EXT MTK_M4U_ID(SMI_L23_ID, 0)
+#define M4U_PORT_L23_HW_VDEC_LAT0_VLD2_EXT MTK_M4U_ID(SMI_L23_ID, 1)
+#define M4U_PORT_L23_HW_VDEC_LAT0_AVC_MV_EXT MTK_M4U_ID(SMI_L23_ID, 2)
+#define M4U_PORT_L23_HW_VDEC_LAT0_PRED_RD_EXT MTK_M4U_ID(SMI_L23_ID, 3)
+#define M4U_PORT_L23_HW_VDEC_LAT0_TILE_EXT MTK_M4U_ID(SMI_L23_ID, 4)
+#define M4U_PORT_L23_HW_VDEC_LAT0_WDMA_EXT MTK_M4U_ID(SMI_L23_ID, 5)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT MTK_M4U_ID(SMI_L23_ID, 6)
+#define M4U_PORT_L23_HW_VDEC_UFO_ENC_EXT_C MTK_M4U_ID(SMI_L23_ID, 7)
+#define M4U_PORT_L23_HW_VDEC_MC_EXT_C MTK_M4U_ID(SMI_L23_ID, 8)
+
+/* LARB 27 -- CCU */
+#define M4U_PORT_L27_CCUI MTK_M4U_ID(SMI_L27_ID, 0)
+#define M4U_PORT_L27_CCUO MTK_M4U_ID(SMI_L27_ID, 1)
+#define M4U_PORT_L27_CCUI2 MTK_M4U_ID(SMI_L27_ID, 2)
+#define M4U_PORT_L27_CCUO2 MTK_M4U_ID(SMI_L27_ID, 3)
+
+/* LARB 28 -- AXI-CCU */
+#define M4U_PORT_L28_CCU_AXI_0 MTK_M4U_ID(SMI_L28_ID, 0)
+
+/* infra/peri */
+#define IFR_IOMMU_PORT_PCIE_0 MTK_IFAIOMMU_PERI_ID(0)
+
+#endif
diff --git a/include/dt-bindings/mux/ti-serdes.h b/include/dt-bindings/mux/ti-serdes.h
index 669ca2d6abce..b0b1091aad6d 100644
--- a/include/dt-bindings/mux/ti-serdes.h
+++ b/include/dt-bindings/mux/ti-serdes.h
@@ -6,6 +6,14 @@
#ifndef _DT_BINDINGS_MUX_TI_SERDES
#define _DT_BINDINGS_MUX_TI_SERDES
+/*
+ * These bindings are deprecated, because they do not match the actual
+ * concept of bindings but rather contain pure constants values used only
+ * in DTS board files.
+ * Instead include the header in the DTS source directory.
+ */
+#warning "These bindings are deprecated. Instead, use the header in the DTS source directory."
+
/* J721E */
#define J721E_SERDES0_LANE0_QSGMII_LANE1 0x0
diff --git a/include/dt-bindings/power/amlogic,c3-pwrc.h b/include/dt-bindings/power/amlogic,c3-pwrc.h
new file mode 100644
index 000000000000..1d98a25b08a4
--- /dev/null
+++ b/include/dt-bindings/power/amlogic,c3-pwrc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/*
+ * Copyright (c) 2023 Amlogic, Inc.
+ * Author: hongyu chen1 <hongyu.chen1@amlogic.com>
+ */
+#ifndef _DT_BINDINGS_AMLOGIC_C3_POWER_H
+#define _DT_BINDINGS_AMLOGIC_C3_POWER_H
+
+#define PWRC_C3_NNA_ID 0
+#define PWRC_C3_AUDIO_ID 1
+#define PWRC_C3_RESV_SEC_ID 2
+#define PWRC_C3_SDIOA_ID 3
+#define PWRC_C3_EMMC_ID 4
+#define PWRC_C3_USB_COMB_ID 5
+#define PWRC_C3_SDCARD_ID 6
+#define PWRC_C3_ETH_ID 7
+#define PWRC_C3_RESV0_ID 8
+#define PWRC_C3_GE2D_ID 9
+#define PWRC_C3_CVE_ID 10
+#define PWRC_C3_GDC_WRAP_ID 11
+#define PWRC_C3_ISP_TOP_ID 12
+#define PWRC_C3_MIPI_ISP_WRAP_ID 13
+#define PWRC_C3_VCODEC_ID 14
+
+#endif
diff --git a/include/dt-bindings/power/meson-a1-power.h b/include/dt-bindings/power/meson-a1-power.h
index 6cf50bfb8ccf..724c370d6853 100644
--- a/include/dt-bindings/power/meson-a1-power.h
+++ b/include/dt-bindings/power/meson-a1-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 Amlogic, Inc.
* Author: Jianxin Pan <jianxin.pan@amlogic.com>
diff --git a/include/dt-bindings/power/meson-axg-power.h b/include/dt-bindings/power/meson-axg-power.h
index e5243884b249..ace0e468ce21 100644
--- a/include/dt-bindings/power/meson-axg-power.h
+++ b/include/dt-bindings/power/meson-axg-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson-g12a-power.h b/include/dt-bindings/power/meson-g12a-power.h
index 93b03bdd60b7..44ec0c50e340 100644
--- a/include/dt-bindings/power/meson-g12a-power.h
+++ b/include/dt-bindings/power/meson-g12a-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson-gxbb-power.h b/include/dt-bindings/power/meson-gxbb-power.h
index 1262dac696c0..8d0b32b6c02c 100644
--- a/include/dt-bindings/power/meson-gxbb-power.h
+++ b/include/dt-bindings/power/meson-gxbb-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson-s4-power.h b/include/dt-bindings/power/meson-s4-power.h
index 462dd2cb938b..f210a524a592 100644
--- a/include/dt-bindings/power/meson-s4-power.h
+++ b/include/dt-bindings/power/meson-s4-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2021 Amlogic, Inc.
* Author: Shunzhou Jiang <shunzhou.jiang@amlogic.com>
diff --git a/include/dt-bindings/power/meson-sm1-power.h b/include/dt-bindings/power/meson-sm1-power.h
index a020ab00c134..d78e710dbfff 100644
--- a/include/dt-bindings/power/meson-sm1-power.h
+++ b/include/dt-bindings/power/meson-sm1-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 BayLibre, SAS
* Author: Neil Armstrong <narmstrong@baylibre.com>
diff --git a/include/dt-bindings/power/meson8-power.h b/include/dt-bindings/power/meson8-power.h
index dd8b2ddb82a7..7a55ba2cd22e 100644
--- a/include/dt-bindings/power/meson8-power.h
+++ b/include/dt-bindings/power/meson8-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2019 Martin Blumenstingl <martin.blumenstingl@googlemail.com>
*/
diff --git a/include/dt-bindings/power/qcom,rpmhpd.h b/include/dt-bindings/power/qcom,rpmhpd.h
new file mode 100644
index 000000000000..7c201a66bc69
--- /dev/null
+++ b/include/dt-bindings/power/qcom,rpmhpd.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_POWER_QCOM_RPMHPD_H
+#define _DT_BINDINGS_POWER_QCOM_RPMHPD_H
+
+/* Generic RPMH Power Domain Indexes */
+#define RPMHPD_CX 0
+#define RPMHPD_CX_AO 1
+#define RPMHPD_EBI 2
+#define RPMHPD_GFX 3
+#define RPMHPD_LCX 4
+#define RPMHPD_LMX 5
+#define RPMHPD_MMCX 6
+#define RPMHPD_MMCX_AO 7
+#define RPMHPD_MX 8
+#define RPMHPD_MX_AO 9
+#define RPMHPD_MXC 10
+#define RPMHPD_MXC_AO 11
+#define RPMHPD_MSS 12
+#define RPMHPD_NSP 13
+#define RPMHPD_NSP0 14
+#define RPMHPD_NSP1 15
+#define RPMHPD_QPHY 16
+#define RPMHPD_DDR 17
+#define RPMHPD_XO 18
+
+#endif
diff --git a/include/dt-bindings/power/r8a779f0-sysc.h b/include/dt-bindings/power/r8a779f0-sysc.h
index 0ec8ad727ed9..cde1536e9ed0 100644
--- a/include/dt-bindings/power/r8a779f0-sysc.h
+++ b/include/dt-bindings/power/r8a779f0-sysc.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (C) 2021 Renesas Electronics Corp.
*/
diff --git a/include/dt-bindings/power/rk3588-power.h b/include/dt-bindings/power/rk3588-power.h
index 1b92fec013cb..6b91a50cc6d6 100644
--- a/include/dt-bindings/power/rk3588-power.h
+++ b/include/dt-bindings/power/rk3588-power.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
#ifndef __DT_BINDINGS_POWER_RK3588_POWER_H__
#define __DT_BINDINGS_POWER_RK3588_POWER_H__
diff --git a/include/dt-bindings/power/summit,smb347-charger.h b/include/dt-bindings/power/summit,smb347-charger.h
index 3205699b5e41..14f2f9cf2020 100644
--- a/include/dt-bindings/power/summit,smb347-charger.h
+++ b/include/dt-bindings/power/summit,smb347-charger.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0-or-later or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) */
/*
* Author: David Heidelberg <david@ixit.cz>
*/
diff --git a/include/dt-bindings/regulator/st,stm32mp13-regulator.h b/include/dt-bindings/regulator/st,stm32mp13-regulator.h
new file mode 100644
index 000000000000..b3a974dfc585
--- /dev/null
+++ b/include/dt-bindings/regulator/st,stm32mp13-regulator.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Copyright (C) 2022, STMicroelectronics - All Rights Reserved
+ */
+
+#ifndef __DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H
+#define __DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H
+
+/* SCMI voltage domains identifiers */
+
+/* SOC Internal regulators */
+#define VOLTD_SCMI_REG11 0
+#define VOLTD_SCMI_REG18 1
+#define VOLTD_SCMI_USB33 2
+#define VOLTD_SCMI_SDMMC1_IO 3
+#define VOLTD_SCMI_SDMMC2_IO 4
+#define VOLTD_SCMI_VREFBUF 5
+
+/* STPMIC1 regulators */
+#define VOLTD_SCMI_STPMIC1_BUCK1 6
+#define VOLTD_SCMI_STPMIC1_BUCK2 7
+#define VOLTD_SCMI_STPMIC1_BUCK3 8
+#define VOLTD_SCMI_STPMIC1_BUCK4 9
+#define VOLTD_SCMI_STPMIC1_LDO1 10
+#define VOLTD_SCMI_STPMIC1_LDO2 11
+#define VOLTD_SCMI_STPMIC1_LDO3 12
+#define VOLTD_SCMI_STPMIC1_LDO4 13
+#define VOLTD_SCMI_STPMIC1_LDO5 14
+#define VOLTD_SCMI_STPMIC1_LDO6 15
+#define VOLTD_SCMI_STPMIC1_VREFDDR 16
+#define VOLTD_SCMI_STPMIC1_BOOST 17
+#define VOLTD_SCMI_STPMIC1_PWR_SW1 18
+#define VOLTD_SCMI_STPMIC1_PWR_SW2 19
+
+/* External regulators */
+#define VOLTD_SCMI_REGU0 20
+#define VOLTD_SCMI_REGU1 21
+#define VOLTD_SCMI_REGU2 22
+#define VOLTD_SCMI_REGU3 23
+#define VOLTD_SCMI_REGU4 24
+
+#endif /*__DT_BINDINGS_REGULATOR_ST_STM32MP13_REGULATOR_H */
diff --git a/include/dt-bindings/reset/altr,rst-mgr-s10.h b/include/dt-bindings/reset/altr,rst-mgr-s10.h
index 70ea3a09dbe1..04c4d0c6fd34 100644
--- a/include/dt-bindings/reset/altr,rst-mgr-s10.h
+++ b/include/dt-bindings/reset/altr,rst-mgr-s10.h
@@ -63,12 +63,15 @@
#define I2C2_RESET 74
#define I2C3_RESET 75
#define I2C4_RESET 76
-/* 77-79 is empty */
+#define I3C0_RESET 77
+#define I3C1_RESET 78
+/* 79 is empty */
#define UART0_RESET 80
#define UART1_RESET 81
/* 82-87 is empty */
#define GPIO0_RESET 88
#define GPIO1_RESET 89
+#define WATCHDOG4_RESET 90
/* BRGMODRST */
#define SOC2FPGA_RESET 96
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq5018.h b/include/dt-bindings/reset/qcom,gcc-ipq5018.h
new file mode 100644
index 000000000000..8f03c92fc23b
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-ipq5018.h
@@ -0,0 +1,122 @@
+/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */
+/*
+ * Copyright (c) 2023, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_5018_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_5018_H
+
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 0
+#define GCC_BLSP1_BCR 1
+#define GCC_BLSP1_QUP1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_QUP3_BCR 4
+#define GCC_BLSP1_UART1_BCR 5
+#define GCC_BLSP1_UART2_BCR 6
+#define GCC_BOOT_ROM_BCR 7
+#define GCC_BTSS_BCR 8
+#define GCC_CMN_BLK_BCR 9
+#define GCC_CMN_LDO_BCR 10
+#define GCC_CE_BCR 11
+#define GCC_CRYPTO_BCR 12
+#define GCC_DCC_BCR 13
+#define GCC_DCD_BCR 14
+#define GCC_DDRSS_BCR 15
+#define GCC_EDPD_BCR 16
+#define GCC_GEPHY_BCR 17
+#define GCC_GEPHY_MDC_SW_ARES 18
+#define GCC_GEPHY_DSP_HW_ARES 19
+#define GCC_GEPHY_RX_ARES 20
+#define GCC_GEPHY_TX_ARES 21
+#define GCC_GMAC0_BCR 22
+#define GCC_GMAC0_CFG_ARES 23
+#define GCC_GMAC0_SYS_ARES 24
+#define GCC_GMAC1_BCR 25
+#define GCC_GMAC1_CFG_ARES 26
+#define GCC_GMAC1_SYS_ARES 27
+#define GCC_IMEM_BCR 28
+#define GCC_LPASS_BCR 29
+#define GCC_MDIO0_BCR 30
+#define GCC_MDIO1_BCR 31
+#define GCC_MPM_BCR 32
+#define GCC_PCIE0_BCR 33
+#define GCC_PCIE0_LINK_DOWN_BCR 34
+#define GCC_PCIE0_PHY_BCR 35
+#define GCC_PCIE0PHY_PHY_BCR 36
+#define GCC_PCIE0_PIPE_ARES 37
+#define GCC_PCIE0_SLEEP_ARES 38
+#define GCC_PCIE0_CORE_STICKY_ARES 39
+#define GCC_PCIE0_AXI_MASTER_ARES 40
+#define GCC_PCIE0_AXI_SLAVE_ARES 41
+#define GCC_PCIE0_AHB_ARES 42
+#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 43
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 44
+#define GCC_PCIE1_BCR 45
+#define GCC_PCIE1_LINK_DOWN_BCR 46
+#define GCC_PCIE1_PHY_BCR 47
+#define GCC_PCIE1PHY_PHY_BCR 48
+#define GCC_PCIE1_PIPE_ARES 49
+#define GCC_PCIE1_SLEEP_ARES 50
+#define GCC_PCIE1_CORE_STICKY_ARES 51
+#define GCC_PCIE1_AXI_MASTER_ARES 52
+#define GCC_PCIE1_AXI_SLAVE_ARES 53
+#define GCC_PCIE1_AHB_ARES 54
+#define GCC_PCIE1_AXI_MASTER_STICKY_ARES 55
+#define GCC_PCIE1_AXI_SLAVE_STICKY_ARES 56
+#define GCC_PCNOC_BCR 57
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 58
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 59
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 60
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 61
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 62
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 63
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 64
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 65
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 66
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 67
+#define GCC_PCNOC_BUS_TIMEOUT10_BCR 68
+#define GCC_PCNOC_BUS_TIMEOUT11_BCR 69
+#define GCC_PRNG_BCR 70
+#define GCC_Q6SS_DBG_ARES 71
+#define GCC_Q6_AHB_S_ARES 72
+#define GCC_Q6_AHB_ARES 73
+#define GCC_Q6_AXIM2_ARES 74
+#define GCC_Q6_AXIM_ARES 75
+#define GCC_Q6_AXIS_ARES 76
+#define GCC_QDSS_BCR 77
+#define GCC_QPIC_BCR 78
+#define GCC_QUSB2_0_PHY_BCR 79
+#define GCC_SDCC1_BCR 80
+#define GCC_SEC_CTRL_BCR 81
+#define GCC_SPDM_BCR 82
+#define GCC_SYSTEM_NOC_BCR 83
+#define GCC_TCSR_BCR 84
+#define GCC_TLMM_BCR 85
+#define GCC_UBI0_AXI_ARES 86
+#define GCC_UBI0_AHB_ARES 87
+#define GCC_UBI0_NC_AXI_ARES 88
+#define GCC_UBI0_DBG_ARES 89
+#define GCC_UBI0_UTCM_ARES 90
+#define GCC_UBI0_CORE_ARES 91
+#define GCC_UBI32_BCR 92
+#define GCC_UNIPHY_BCR 93
+#define GCC_UNIPHY_AHB_ARES 94
+#define GCC_UNIPHY_SYS_ARES 95
+#define GCC_UNIPHY_RX_ARES 96
+#define GCC_UNIPHY_TX_ARES 97
+#define GCC_USB0_BCR 98
+#define GCC_USB0_PHY_BCR 99
+#define GCC_WCSS_BCR 100
+#define GCC_WCSS_DBG_ARES 101
+#define GCC_WCSS_ECAHB_ARES 102
+#define GCC_WCSS_ACMT_ARES 103
+#define GCC_WCSS_DBG_BDG_ARES 104
+#define GCC_WCSS_AHB_S_ARES 105
+#define GCC_WCSS_AXI_M_ARES 106
+#define GCC_WCSS_AXI_S_ARES 107
+#define GCC_WCSS_Q6_BCR 108
+#define GCC_WCSSAON_RESET 109
+#define GCC_UNIPHY_SOFT_RESET 110
+#define GCC_GEPHY_MISC_ARES 111
+
+#endif
diff --git a/include/dt-bindings/reset/rockchip,rk3588-cru.h b/include/dt-bindings/reset/rockchip,rk3588-cru.h
index 738e56aead93..d4264db2a07f 100644
--- a/include/dt-bindings/reset/rockchip,rk3588-cru.h
+++ b/include/dt-bindings/reset/rockchip,rk3588-cru.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0 or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
/*
* Copyright (c) 2021 Rockchip Electronics Co. Ltd.
* Copyright (c) 2022 Collabora Ltd.
diff --git a/include/dt-bindings/reset/starfive,jh7110-crg.h b/include/dt-bindings/reset/starfive,jh7110-crg.h
index d78e38690ceb..eaf4a0d84f6a 100644
--- a/include/dt-bindings/reset/starfive,jh7110-crg.h
+++ b/include/dt-bindings/reset/starfive,jh7110-crg.h
@@ -1,6 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/*
* Copyright (C) 2022 Emil Renner Berthing <kernel@esmil.dk>
+ * Copyright (C) 2022 StarFive Technology Co., Ltd.
*/
#ifndef __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__
@@ -151,4 +152,63 @@
#define JH7110_AONRST_END 8
+/* STGCRG resets */
+#define JH7110_STGRST_SYSCON 0
+#define JH7110_STGRST_HIFI4_CORE 1
+#define JH7110_STGRST_HIFI4_AXI 2
+#define JH7110_STGRST_SEC_AHB 3
+#define JH7110_STGRST_E24_CORE 4
+#define JH7110_STGRST_DMA1P_AXI 5
+#define JH7110_STGRST_DMA1P_AHB 6
+#define JH7110_STGRST_USB0_AXI 7
+#define JH7110_STGRST_USB0_APB 8
+#define JH7110_STGRST_USB0_UTMI_APB 9
+#define JH7110_STGRST_USB0_PWRUP 10
+#define JH7110_STGRST_PCIE0_AXI_MST0 11
+#define JH7110_STGRST_PCIE0_AXI_SLV0 12
+#define JH7110_STGRST_PCIE0_AXI_SLV 13
+#define JH7110_STGRST_PCIE0_BRG 14
+#define JH7110_STGRST_PCIE0_CORE 15
+#define JH7110_STGRST_PCIE0_APB 16
+#define JH7110_STGRST_PCIE1_AXI_MST0 17
+#define JH7110_STGRST_PCIE1_AXI_SLV0 18
+#define JH7110_STGRST_PCIE1_AXI_SLV 19
+#define JH7110_STGRST_PCIE1_BRG 20
+#define JH7110_STGRST_PCIE1_CORE 21
+#define JH7110_STGRST_PCIE1_APB 22
+
+#define JH7110_STGRST_END 23
+
+/* ISPCRG resets */
+#define JH7110_ISPRST_ISPV2_TOP_WRAPPER_P 0
+#define JH7110_ISPRST_ISPV2_TOP_WRAPPER_C 1
+#define JH7110_ISPRST_M31DPHY_HW 2
+#define JH7110_ISPRST_M31DPHY_B09_AON 3
+#define JH7110_ISPRST_VIN_APB 4
+#define JH7110_ISPRST_VIN_PIXEL_IF0 5
+#define JH7110_ISPRST_VIN_PIXEL_IF1 6
+#define JH7110_ISPRST_VIN_PIXEL_IF2 7
+#define JH7110_ISPRST_VIN_PIXEL_IF3 8
+#define JH7110_ISPRST_VIN_SYS 9
+#define JH7110_ISPRST_VIN_P_AXI_RD 10
+#define JH7110_ISPRST_VIN_P_AXI_WR 11
+
+#define JH7110_ISPRST_END 12
+
+/* VOUTCRG resets */
+#define JH7110_VOUTRST_DC8200_AXI 0
+#define JH7110_VOUTRST_DC8200_AHB 1
+#define JH7110_VOUTRST_DC8200_CORE 2
+#define JH7110_VOUTRST_DSITX_DPI 3
+#define JH7110_VOUTRST_DSITX_APB 4
+#define JH7110_VOUTRST_DSITX_RXESC 5
+#define JH7110_VOUTRST_DSITX_SYS 6
+#define JH7110_VOUTRST_DSITX_TXBYTEHS 7
+#define JH7110_VOUTRST_DSITX_TXESC 8
+#define JH7110_VOUTRST_HDMI_TX_HDMI 9
+#define JH7110_VOUTRST_MIPITX_DPHY_SYS 10
+#define JH7110_VOUTRST_MIPITX_DPHY_TXBYTEHS 11
+
+#define JH7110_VOUTRST_END 12
+
#endif /* __DT_BINDINGS_RESET_STARFIVE_JH7110_CRG_H__ */
diff --git a/include/dt-bindings/reset/stm32mp1-resets.h b/include/dt-bindings/reset/stm32mp1-resets.h
index 4ffa7c3612e6..9071f139649f 100644
--- a/include/dt-bindings/reset/stm32mp1-resets.h
+++ b/include/dt-bindings/reset/stm32mp1-resets.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause */
+/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
/*
* Copyright (C) STMicroelectronics 2018 - All Rights Reserved
* Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
diff --git a/include/dt-bindings/reset/sun20i-d1-ccu.h b/include/dt-bindings/reset/sun20i-d1-ccu.h
index f8001cf50bf1..79e52aca5912 100644
--- a/include/dt-bindings/reset/sun20i-d1-ccu.h
+++ b/include/dt-bindings/reset/sun20i-d1-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 huangzhenwei@allwinnertech.com
* Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
diff --git a/include/dt-bindings/reset/sun20i-d1-r-ccu.h b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
index d93d6423d283..e20babc990af 100644
--- a/include/dt-bindings/reset/sun20i-d1-r-ccu.h
+++ b/include/dt-bindings/reset/sun20i-d1-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2021 Samuel Holland <samuel@sholland.org>
*/
diff --git a/include/dt-bindings/reset/sun50i-a100-ccu.h b/include/dt-bindings/reset/sun50i-a100-ccu.h
index 55c0ada99885..d13764bc1860 100644
--- a/include/dt-bindings/reset/sun50i-a100-ccu.h
+++ b/include/dt-bindings/reset/sun50i-a100-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/reset/sun50i-a100-r-ccu.h b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
index 737bf6f66626..1e7c4431f03c 100644
--- a/include/dt-bindings/reset/sun50i-a100-r-ccu.h
+++ b/include/dt-bindings/reset/sun50i-a100-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (c) 2020 Yangtao Li <frank@allwinnertech.com>
*/
diff --git a/include/dt-bindings/reset/sun50i-h6-ccu.h b/include/dt-bindings/reset/sun50i-h6-ccu.h
index 81106f455097..d038ddfa4818 100644
--- a/include/dt-bindings/reset/sun50i-h6-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-ccu.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: (GPL-2.0+ or MIT)
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2017 Icenowy Zheng <icenowy@aosc.io>
*/
diff --git a/include/dt-bindings/reset/sun50i-h6-r-ccu.h b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
index 7950e799c76d..d541ade884fc 100644
--- a/include/dt-bindings/reset/sun50i-h6-r-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h6-r-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2016 Icenowy Zheng <icenowy@aosc.xyz>
*/
diff --git a/include/dt-bindings/reset/sun50i-h616-ccu.h b/include/dt-bindings/reset/sun50i-h616-ccu.h
index cb6285a8d128..1bd8bb0a11be 100644
--- a/include/dt-bindings/reset/sun50i-h616-ccu.h
+++ b/include/dt-bindings/reset/sun50i-h616-ccu.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: (GPL-2.0+ or MIT) */
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
/*
* Copyright (C) 2020 Arm Ltd.
*/
diff --git a/include/kunit/test.h b/include/kunit/test.h
index d33114097d0d..68ff01aee244 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -12,6 +12,7 @@
#include <kunit/assert.h>
#include <kunit/try-catch.h>
+#include <linux/args.h>
#include <linux/compiler.h>
#include <linux/container_of.h>
#include <linux/err.h>
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 847da6fc2713..31029f4f7be8 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -12,7 +12,7 @@
#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
-#ifdef CONFIG_HW_PERF_EVENTS
+#if IS_ENABLED(CONFIG_HW_PERF_EVENTS) && IS_ENABLED(CONFIG_KVM)
struct kvm_pmc {
u8 idx; /* index into the pmu->pmc array */
@@ -74,6 +74,7 @@ int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
struct kvm_pmu_events *kvm_get_pmu_events(void);
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu);
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
+void kvm_vcpu_pmu_resync_el0(void);
#define kvm_vcpu_has_pmu(vcpu) \
(test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features))
@@ -171,6 +172,7 @@ static inline u8 kvm_arm_pmu_get_pmuver_limit(void)
{
return 0;
}
+static inline void kvm_vcpu_pmu_resync_el0(void) {}
#endif
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 3a3ab05e13fd..2dd175f5debd 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -41,19 +41,8 @@ struct aer_capability_regs {
};
#if defined(CONFIG_PCIEAER)
-/* PCIe port driver needs this function to enable AER */
-int pci_enable_pcie_error_reporting(struct pci_dev *dev);
-int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
#else
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
-static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
- return -EINVAL;
-}
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
diff --git a/include/linux/amd-iommu.h b/include/linux/amd-iommu.h
index 953e6f12fa1c..99a5201d9e62 100644
--- a/include/linux/amd-iommu.h
+++ b/include/linux/amd-iommu.h
@@ -32,7 +32,6 @@ struct task_struct;
struct pci_dev;
extern int amd_iommu_detect(void);
-extern int amd_iommu_init_hardware(void);
/**
* amd_iommu_init_device() - Init device for use with IOMMUv2 driver
diff --git a/include/linux/args.h b/include/linux/args.h
new file mode 100644
index 000000000000..8ff60a54eb7d
--- /dev/null
+++ b/include/linux/args.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_ARGS_H
+#define _LINUX_ARGS_H
+
+/*
+ * How do these macros work?
+ *
+ * In __COUNT_ARGS() _0 to _12 are just placeholders from the start
+ * in order to make sure _n is positioned over the correct number
+ * from 12 to 0 (depending on X, which is a variadic argument list).
+ * They serve no purpose other than occupying a position. Since each
+ * macro parameter must have a distinct identifier, those identifiers
+ * are as good as any.
+ *
+ * In COUNT_ARGS() we use actual integers, so __COUNT_ARGS() returns
+ * that as _n.
+ */
+
+/* This counts to 12. Any more, it will return 13th argument. */
+#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
+#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
+
+/* Concatenate two parameters, but allow them to be expanded beforehand. */
+#define __CONCAT(a, b) a ## b
+#define CONCATENATE(a, b) __CONCAT(a, b)
+
+#endif /* _LINUX_ARGS_H */
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index f196c19f8e55..7c67c17321d4 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -5,6 +5,7 @@
#ifndef __LINUX_ARM_SMCCC_H
#define __LINUX_ARM_SMCCC_H
+#include <linux/args.h>
#include <linux/init.h>
#include <uapi/linux/const.h>
@@ -413,31 +414,26 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#endif
-#define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x
+#define __constraint_read_2 "r" (arg0)
+#define __constraint_read_3 __constraint_read_2, "r" (arg1)
+#define __constraint_read_4 __constraint_read_3, "r" (arg2)
+#define __constraint_read_5 __constraint_read_4, "r" (arg3)
+#define __constraint_read_6 __constraint_read_5, "r" (arg4)
+#define __constraint_read_7 __constraint_read_6, "r" (arg5)
+#define __constraint_read_8 __constraint_read_7, "r" (arg6)
+#define __constraint_read_9 __constraint_read_8, "r" (arg7)
-#define __count_args(...) \
- ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0)
-
-#define __constraint_read_0 "r" (arg0)
-#define __constraint_read_1 __constraint_read_0, "r" (arg1)
-#define __constraint_read_2 __constraint_read_1, "r" (arg2)
-#define __constraint_read_3 __constraint_read_2, "r" (arg3)
-#define __constraint_read_4 __constraint_read_3, "r" (arg4)
-#define __constraint_read_5 __constraint_read_4, "r" (arg5)
-#define __constraint_read_6 __constraint_read_5, "r" (arg6)
-#define __constraint_read_7 __constraint_read_6, "r" (arg7)
-
-#define __declare_arg_0(a0, res) \
+#define __declare_arg_2(a0, res) \
struct arm_smccc_res *___res = res; \
register unsigned long arg0 asm("r0") = (u32)a0
-#define __declare_arg_1(a0, a1, res) \
+#define __declare_arg_3(a0, a1, res) \
typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
register unsigned long arg0 asm("r0") = (u32)a0; \
register typeof(a1) arg1 asm("r1") = __a1
-#define __declare_arg_2(a0, a1, a2, res) \
+#define __declare_arg_4(a0, a1, a2, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
@@ -445,7 +441,7 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register typeof(a1) arg1 asm("r1") = __a1; \
register typeof(a2) arg2 asm("r2") = __a2
-#define __declare_arg_3(a0, a1, a2, a3, res) \
+#define __declare_arg_5(a0, a1, a2, a3, res) \
typeof(a1) __a1 = a1; \
typeof(a2) __a2 = a2; \
typeof(a3) __a3 = a3; \
@@ -455,34 +451,26 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register typeof(a2) arg2 asm("r2") = __a2; \
register typeof(a3) arg3 asm("r3") = __a3
-#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+#define __declare_arg_6(a0, a1, a2, a3, a4, res) \
typeof(a4) __a4 = a4; \
- __declare_arg_3(a0, a1, a2, a3, res); \
+ __declare_arg_5(a0, a1, a2, a3, res); \
register typeof(a4) arg4 asm("r4") = __a4
-#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+#define __declare_arg_7(a0, a1, a2, a3, a4, a5, res) \
typeof(a5) __a5 = a5; \
- __declare_arg_4(a0, a1, a2, a3, a4, res); \
+ __declare_arg_6(a0, a1, a2, a3, a4, res); \
register typeof(a5) arg5 asm("r5") = __a5
-#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+#define __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res) \
typeof(a6) __a6 = a6; \
- __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
+ __declare_arg_7(a0, a1, a2, a3, a4, a5, res); \
register typeof(a6) arg6 asm("r6") = __a6
-#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+#define __declare_arg_9(a0, a1, a2, a3, a4, a5, a6, a7, res) \
typeof(a7) __a7 = a7; \
- __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
+ __declare_arg_8(a0, a1, a2, a3, a4, a5, a6, res); \
register typeof(a7) arg7 asm("r7") = __a7
-#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
-#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
-
-#define ___constraints(count) \
- : __constraint_read_ ## count \
- : smccc_sve_clobbers "memory"
-#define __constraints(count) ___constraints(count)
-
/*
* We have an output list that is not necessarily used, and GCC feels
* entitled to optimise the whole sequence away. "volatile" is what
@@ -494,11 +482,14 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
register unsigned long r1 asm("r1"); \
register unsigned long r2 asm("r2"); \
register unsigned long r3 asm("r3"); \
- __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
asm volatile(SMCCC_SVE_CHECK \
inst "\n" : \
"=r" (r0), "=r" (r1), "=r" (r2), "=r" (r3) \
- __constraints(__count_args(__VA_ARGS__))); \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : smccc_sve_clobbers "memory"); \
if (___res) \
*___res = (typeof(*___res)){r0, r1, r2, r3}; \
} while (0)
@@ -542,8 +533,12 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
*/
#define __fail_smccc_1_1(...) \
do { \
- __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
- asm ("" : __constraints(__count_args(__VA_ARGS__))); \
+ CONCATENATE(__declare_arg_, \
+ COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__); \
+ asm ("" : \
+ : CONCATENATE(__constraint_read_, \
+ COUNT_ARGS(__VA_ARGS__)) \
+ : smccc_sve_clobbers "memory"); \
if (___res) \
___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
} while (0)
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 6a3a9e122bb5..51b1b7054a23 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -117,6 +117,8 @@ enum audit_nfcfgop {
AUDIT_NFT_OP_OBJ_RESET,
AUDIT_NFT_OP_FLOWTABLE_REGISTER,
AUDIT_NFT_OP_FLOWTABLE_UNREGISTER,
+ AUDIT_NFT_OP_SETELEM_RESET,
+ AUDIT_NFT_OP_RULE_RESET,
AUDIT_NFT_OP_INVALID,
};
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index fbad4fcd408e..1a97277f99b1 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -46,7 +46,6 @@ extern spinlock_t bdi_lock;
extern struct list_head bdi_list;
extern struct workqueue_struct *bdi_wq;
-extern struct workqueue_struct *bdi_async_bio_wq;
static inline bool wb_has_dirty_io(struct bdi_writeback *wb)
{
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 11984ed29cb8..41d417ee1349 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -253,6 +253,11 @@ static inline struct page *bio_first_page_all(struct bio *bio)
return bio_first_bvec_all(bio)->bv_page;
}
+static inline struct folio *bio_first_folio_all(struct bio *bio)
+{
+ return page_folio(bio_first_page_all(bio));
+}
+
static inline struct bio_vec *bio_last_bvec_all(struct bio *bio)
{
WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED));
@@ -488,7 +493,12 @@ extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern void bio_free_pages(struct bio *bio);
void guard_bio_eod(struct bio *bio);
-void zero_fill_bio(struct bio *bio);
+void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
+
+static inline void zero_fill_bio(struct bio *bio)
+{
+ zero_fill_bio_iter(bio, bio->bi_iter);
+}
static inline void bio_release_pages(struct bio *bio, bool mark_dirty)
{
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 495ca198775f..958ed7e89b30 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -178,14 +178,10 @@ struct request {
struct {
unsigned int seq;
- struct list_head list;
rq_end_io_fn *saved_end_io;
} flush;
- union {
- struct __call_single_data csd;
- u64 fifo_time;
- };
+ u64 fifo_time;
/*
* completion callback.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 83ce87354e9a..eef450f25982 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -538,6 +538,7 @@ struct request_queue {
#define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */
#define QUEUE_FLAG_SYNCHRONOUS 11 /* always completes in submit context */
#define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */
+#define QUEUE_FLAG_HW_WC 18 /* Write back caching supported */
#define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */
#define QUEUE_FLAG_STABLE_WRITES 15 /* don't modify blks until WB is done */
#define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */
@@ -846,6 +847,7 @@ extern const char *blk_op_str(enum req_op op);
int blk_status_to_errno(blk_status_t status);
blk_status_t errno_to_blk_status(int errno);
+const char *blk_status_to_str(blk_status_t status);
/* only poll the hardware once, don't continue until a completion was found */
#define BLK_POLL_ONESHOT (1 << 0)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 12596af59c00..49f8b691496c 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -438,7 +438,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
size /= sizeof(long);
while (size--)
- *ldst++ = *lsrc++;
+ data_race(*ldst++ = *lsrc++);
}
/* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */
@@ -1307,7 +1307,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return NULL;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
diff --git a/include/linux/btf.h b/include/linux/btf.h
index df64cc642074..928113a80a95 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -209,6 +209,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
int btf_check_and_fixup_fields(const struct btf *btf, struct btf_record *rec);
bool btf_type_is_void(const struct btf_type *t);
s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+s32 bpf_find_btf_id(const char *name, u32 kind, struct btf **btf_p);
const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
u32 id, u32 *res_id);
const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index a3462a9b8e18..a9cb10b0e2e9 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -49,7 +49,7 @@ word \
____BTF_ID(symbol, word)
#define __ID(prefix) \
- __PASTE(prefix, __COUNTER__)
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
/*
* The BTF_ID defines unique symbol for each ID pointing
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 6cb3e9af78c9..44e9de51eedf 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -16,8 +16,6 @@
#include <linux/wait.h>
#include <linux/atomic.h>
-#ifdef CONFIG_BLOCK
-
enum bh_state_bits {
BH_Uptodate, /* Contains valid data */
BH_Dirty, /* Is dirty */
@@ -173,7 +171,10 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
return test_bit_acquire(BH_Uptodate, &bh->b_state);
}
-#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+static inline unsigned long bh_offset(const struct buffer_head *bh)
+{
+ return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
+}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
@@ -194,11 +195,8 @@ void buffer_check_dirty_writeback(struct folio *folio,
void mark_buffer_dirty(struct buffer_head *bh);
void mark_buffer_write_io_error(struct buffer_head *bh);
void touch_buffer(struct buffer_head *bh);
-void set_bh_page(struct buffer_head *bh,
- struct page *page, unsigned long offset);
void folio_set_bh(struct buffer_head *bh, struct folio *folio,
unsigned long offset);
-bool try_to_free_buffers(struct folio *);
struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
bool retry);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
@@ -213,10 +211,6 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate);
/* Things to do with buffers at mapping->private_list */
void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode);
-int inode_has_buffers(struct inode *);
-void invalidate_inode_buffers(struct inode *);
-int remove_inode_buffers(struct inode *inode);
-int sync_mapping_buffers(struct address_space *mapping);
int generic_buffers_fsync_noflush(struct file *file, loff_t start, loff_t end,
bool datasync);
int generic_buffers_fsync(struct file *file, loff_t start, loff_t end,
@@ -240,9 +234,6 @@ void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
struct buffer_head *__bread_gfp(struct block_device *,
sector_t block, unsigned size, gfp_t gfp);
-void invalidate_bh_lrus(void);
-void invalidate_bh_lrus_cpu(void);
-bool has_bh_in_lru(int cpu, void *dummy);
struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
void free_buffer_head(struct buffer_head * bh);
void unlock_buffer(struct buffer_head *bh);
@@ -258,8 +249,6 @@ int __bh_read(struct buffer_head *bh, blk_opf_t op_flags, bool wait);
void __bh_read_batch(int nr, struct buffer_head *bhs[],
blk_opf_t op_flags, bool force_lock);
-extern int buffer_heads_over_limit;
-
/*
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
@@ -288,21 +277,9 @@ int cont_write_begin(struct file *, struct address_space *, loff_t,
unsigned, struct page **, void **,
get_block_t *, loff_t *);
int generic_cont_expand_simple(struct inode *inode, loff_t size);
-int block_commit_write(struct page *page, unsigned from, unsigned to);
+void block_commit_write(struct page *page, unsigned int from, unsigned int to);
int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf,
get_block_t get_block);
-/* Convert errno to return value from ->page_mkwrite() call */
-static inline vm_fault_t block_page_mkwrite_return(int err)
-{
- if (err == 0)
- return VM_FAULT_LOCKED;
- if (err == -EFAULT || err == -EAGAIN)
- return VM_FAULT_NOPAGE;
- if (err == -ENOMEM)
- return VM_FAULT_OOM;
- /* -ENOSPC, -EDQUOT, -EIO ... */
- return VM_FAULT_SIGBUS;
-}
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
@@ -316,8 +293,6 @@ extern int buffer_migrate_folio_norefs(struct address_space *,
#define buffer_migrate_folio_norefs NULL
#endif
-void buffer_init(void);
-
/*
* inline definitions
*/
@@ -477,7 +452,20 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
-#else /* CONFIG_BLOCK */
+#ifdef CONFIG_BUFFER_HEAD
+
+void buffer_init(void);
+bool try_to_free_buffers(struct folio *folio);
+int inode_has_buffers(struct inode *inode);
+void invalidate_inode_buffers(struct inode *inode);
+int remove_inode_buffers(struct inode *inode);
+int sync_mapping_buffers(struct address_space *mapping);
+void invalidate_bh_lrus(void);
+void invalidate_bh_lrus_cpu(void);
+bool has_bh_in_lru(int cpu, void *dummy);
+extern int buffer_heads_over_limit;
+
+#else /* CONFIG_BUFFER_HEAD */
static inline void buffer_init(void) {}
static inline bool try_to_free_buffers(struct folio *folio) { return true; }
@@ -485,9 +473,10 @@ static inline int inode_has_buffers(struct inode *inode) { return 0; }
static inline void invalidate_inode_buffers(struct inode *inode) {}
static inline int remove_inode_buffers(struct inode *inode) { return 1; }
static inline int sync_mapping_buffers(struct address_space *mapping) { return 0; }
+static inline void invalidate_bh_lrus(void) {}
static inline void invalidate_bh_lrus_cpu(void) {}
static inline bool has_bh_in_lru(int cpu, void *dummy) { return false; }
#define buffer_heads_over_limit 0
-#endif /* CONFIG_BLOCK */
+#endif /* CONFIG_BUFFER_HEAD */
#endif /* _LINUX_BUFFER_HEAD_H */
diff --git a/include/linux/cacheflush.h b/include/linux/cacheflush.h
index a6189d21f2ba..55f297b2c23f 100644
--- a/include/linux/cacheflush.h
+++ b/include/linux/cacheflush.h
@@ -7,14 +7,23 @@
struct folio;
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
-#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
+#ifndef flush_dcache_folio
void flush_dcache_folio(struct folio *folio);
#endif
#else
static inline void flush_dcache_folio(struct folio *folio)
{
}
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
+#define flush_dcache_folio flush_dcache_folio
#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
+#ifndef flush_icache_pages
+static inline void flush_icache_pages(struct vm_area_struct *vma,
+ struct page *page, unsigned int nr)
+{
+}
+#endif
+
+#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
+
#endif /* _LINUX_CACHEFLUSH_H */
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 49586ff26152..5f2301ee88bc 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -359,14 +359,19 @@ enum {
extern const char *ceph_mds_op_name(int op);
-
-#define CEPH_SETATTR_MODE 1
-#define CEPH_SETATTR_UID 2
-#define CEPH_SETATTR_GID 4
-#define CEPH_SETATTR_MTIME 8
-#define CEPH_SETATTR_ATIME 16
-#define CEPH_SETATTR_SIZE 32
-#define CEPH_SETATTR_CTIME 64
+#define CEPH_SETATTR_MODE (1 << 0)
+#define CEPH_SETATTR_UID (1 << 1)
+#define CEPH_SETATTR_GID (1 << 2)
+#define CEPH_SETATTR_MTIME (1 << 3)
+#define CEPH_SETATTR_ATIME (1 << 4)
+#define CEPH_SETATTR_SIZE (1 << 5)
+#define CEPH_SETATTR_CTIME (1 << 6)
+#define CEPH_SETATTR_MTIME_NOW (1 << 7)
+#define CEPH_SETATTR_ATIME_NOW (1 << 8)
+#define CEPH_SETATTR_BTIME (1 << 9)
+#define CEPH_SETATTR_KILL_SGUID (1 << 10)
+#define CEPH_SETATTR_FSCRYPT_AUTH (1 << 11)
+#define CEPH_SETATTR_FSCRYPT_FILE (1 << 12)
/*
* Ceph setxattr request flags.
@@ -462,24 +467,26 @@ union ceph_mds_request_args {
} __attribute__ ((packed));
union ceph_mds_request_args_ext {
- union ceph_mds_request_args old;
- struct {
- __le32 mode;
- __le32 uid;
- __le32 gid;
- struct ceph_timespec mtime;
- struct ceph_timespec atime;
- __le64 size, old_size; /* old_size needed by truncate */
- __le32 mask; /* CEPH_SETATTR_* */
- struct ceph_timespec btime;
- } __attribute__ ((packed)) setattr_ext;
+ union {
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
+ };
};
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
#define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */
#define CEPH_MDS_FLAG_ASYNC 4 /* request is asynchronous */
-struct ceph_mds_request_head_old {
+struct ceph_mds_request_head_legacy {
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
__le32 flags; /* CEPH_MDS_FLAG_* */
@@ -492,9 +499,9 @@ struct ceph_mds_request_head_old {
union ceph_mds_request_args args;
} __attribute__ ((packed));
-#define CEPH_MDS_REQUEST_HEAD_VERSION 1
+#define CEPH_MDS_REQUEST_HEAD_VERSION 2
-struct ceph_mds_request_head {
+struct ceph_mds_request_head_old {
__le16 version; /* struct version */
__le64 oldest_client_tid;
__le32 mdsmap_epoch; /* on client */
@@ -508,6 +515,23 @@ struct ceph_mds_request_head {
union ceph_mds_request_args_ext args;
} __attribute__ ((packed));
+struct ceph_mds_request_head {
+ __le16 version; /* struct version */
+ __le64 oldest_client_tid;
+ __le32 mdsmap_epoch; /* on client */
+ __le32 flags; /* CEPH_MDS_FLAG_* */
+ __u8 num_retry, num_fwd; /* legacy count retry and fwd attempts */
+ __le16 num_releases; /* # include cap/lease release records */
+ __le32 op; /* mds op code */
+ __le32 caller_uid, caller_gid;
+ __le64 ino; /* use this ino for openc, mkdir, mknod,
+ etc. (if replaying) */
+ union ceph_mds_request_args_ext args;
+
+ __le32 ext_num_retry; /* new count retry attempts */
+ __le32 ext_num_fwd; /* new count fwd attempts */
+} __attribute__ ((packed));
+
/* cap/lease release record */
struct ceph_mds_request_release {
__le64 ino, cap_id; /* ino and unique cap id */
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 99c1726be6ee..2eaaabbe98cb 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -17,6 +17,7 @@
struct ceph_msg;
struct ceph_connection;
+struct ceph_msg_data_cursor;
/*
* Ceph defines these callbacks for handling connection events.
@@ -70,6 +71,30 @@ struct ceph_connection_operations {
int used_proto, int result,
const int *allowed_protos, int proto_cnt,
const int *allowed_modes, int mode_cnt);
+
+ /**
+ * sparse_read: read sparse data
+ * @con: connection we're reading from
+ * @cursor: data cursor for reading extents
+ * @buf: optional buffer to read into
+ *
+ * This should be called more than once, each time setting up to
+ * receive an extent into the current cursor position, and zeroing
+ * the holes between them.
+ *
+ * Returns amount of data to be read (in bytes), 0 if reading is
+ * complete, or -errno if there was an error.
+ *
+ * If @buf is set on a >0 return, then the data should be read into
+ * the provided buffer. Otherwise, it should be read into the cursor.
+ *
+ * The sparse read operation is expected to initialize the cursor
+ * with a length covering up to the end of the last extent.
+ */
+ int (*sparse_read)(struct ceph_connection *con,
+ struct ceph_msg_data_cursor *cursor,
+ char **buf);
+
};
/* use format string %s%lld */
@@ -98,6 +123,7 @@ enum ceph_msg_data_type {
CEPH_MSG_DATA_BIO, /* data source/destination is a bio list */
#endif /* CONFIG_BLOCK */
CEPH_MSG_DATA_BVECS, /* data source/destination is a bio_vec array */
+ CEPH_MSG_DATA_ITER, /* data source/destination is an iov_iter */
};
#ifdef CONFIG_BLOCK
@@ -199,6 +225,7 @@ struct ceph_msg_data {
bool own_pages;
};
struct ceph_pagelist *pagelist;
+ struct iov_iter iter;
};
};
@@ -207,6 +234,7 @@ struct ceph_msg_data_cursor {
struct ceph_msg_data *data; /* current data item */
size_t resid; /* bytes not yet consumed */
+ int sr_resid; /* residual sparse_read len */
bool need_crc; /* crc update needed */
union {
#ifdef CONFIG_BLOCK
@@ -222,6 +250,10 @@ struct ceph_msg_data_cursor {
struct page *page; /* page from list */
size_t offset; /* bytes from list */
};
+ struct {
+ struct iov_iter iov_iter;
+ unsigned int lastlen;
+ };
};
};
@@ -251,6 +283,7 @@ struct ceph_msg {
struct kref kref;
bool more_to_follow;
bool needs_out_seq;
+ bool sparse_read;
int front_alloc_len;
struct ceph_msgpool *pool;
@@ -309,6 +342,10 @@ struct ceph_connection_v1_info {
int in_base_pos; /* bytes read */
+ /* sparse reads */
+ struct kvec in_sr_kvec; /* current location to receive into */
+ u64 in_sr_len; /* amount of data in this extent */
+
/* message in temps */
u8 in_tag; /* protocol control byte */
struct ceph_msg_header in_hdr;
@@ -395,6 +432,7 @@ struct ceph_connection_v2_info {
void *conn_bufs[16];
int conn_buf_cnt;
+ int data_len_remain;
struct kvec in_sign_kvecs[8];
struct kvec out_sign_kvecs[8];
@@ -573,6 +611,8 @@ void ceph_msg_data_add_bio(struct ceph_msg *msg, struct ceph_bio_iter *bio_pos,
#endif /* CONFIG_BLOCK */
void ceph_msg_data_add_bvecs(struct ceph_msg *msg,
struct ceph_bvec_iter *bvec_pos);
+void ceph_msg_data_add_iter(struct ceph_msg *msg,
+ struct iov_iter *iter);
struct ceph_msg *ceph_msg_new2(int type, int front_len, int max_data_items,
gfp_t flags, bool can_fail);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index fb6be72104df..bf9823956758 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -29,14 +29,62 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
#define CEPH_HOMELESS_OSD -1
-/* a given osd we're communicating with */
+/*
+ * A single extent in a SPARSE_READ reply.
+ *
+ * Note that these come from the OSD as little-endian values. On BE arches,
+ * we convert them in-place after receipt.
+ */
+struct ceph_sparse_extent {
+ u64 off;
+ u64 len;
+} __packed;
+
+/* Sparse read state machine state values */
+enum ceph_sparse_read_state {
+ CEPH_SPARSE_READ_HDR = 0,
+ CEPH_SPARSE_READ_EXTENTS,
+ CEPH_SPARSE_READ_DATA_LEN,
+ CEPH_SPARSE_READ_DATA,
+};
+
+/*
+ * A SPARSE_READ reply is a 32-bit count of extents, followed by an array of
+ * 64-bit offset/length pairs, and then all of the actual file data
+ * concatenated after it (sans holes).
+ *
+ * Unfortunately, we don't know how long the extent array is until we've
+ * started reading the data section of the reply. The caller should send down
+ * a destination buffer for the array, but we'll alloc one if it's too small
+ * or if the caller doesn't.
+ */
+struct ceph_sparse_read {
+ enum ceph_sparse_read_state sr_state; /* state machine state */
+ u64 sr_req_off; /* orig request offset */
+ u64 sr_req_len; /* orig request length */
+ u64 sr_pos; /* current pos in buffer */
+ int sr_index; /* current extent index */
+ __le32 sr_datalen; /* length of actual data */
+ u32 sr_count; /* extent count in reply */
+ int sr_ext_len; /* length of extent array */
+ struct ceph_sparse_extent *sr_extent; /* extent array */
+};
+
+/*
+ * A given osd we're communicating with.
+ *
+ * Note that the o_requests tree can be searched while holding the "lock" mutex
+ * or the "o_requests_lock" spinlock. Insertion or removal requires both!
+ */
struct ceph_osd {
refcount_t o_ref;
+ int o_sparse_op_idx;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
struct rb_node o_node;
struct ceph_connection o_con;
+ spinlock_t o_requests_lock;
struct rb_root o_requests;
struct rb_root o_linger_requests;
struct rb_root o_backoff_mappings;
@@ -46,6 +94,7 @@ struct ceph_osd {
unsigned long lru_ttl;
struct list_head o_keepalive_item;
struct mutex lock;
+ struct ceph_sparse_read o_sparse_read;
};
#define CEPH_OSD_SLAB_OPS 2
@@ -59,6 +108,7 @@ enum ceph_osd_data_type {
CEPH_OSD_DATA_TYPE_BIO,
#endif /* CONFIG_BLOCK */
CEPH_OSD_DATA_TYPE_BVECS,
+ CEPH_OSD_DATA_TYPE_ITER,
};
struct ceph_osd_data {
@@ -82,6 +132,7 @@ struct ceph_osd_data {
struct ceph_bvec_iter bvec_pos;
u32 num_bvecs;
};
+ struct iov_iter iter;
};
};
@@ -98,6 +149,8 @@ struct ceph_osd_req_op {
u64 offset, length;
u64 truncate_size;
u32 truncate_seq;
+ int sparse_ext_cnt;
+ struct ceph_sparse_extent *sparse_ext;
struct ceph_osd_data osd_data;
} extent;
struct {
@@ -145,6 +198,9 @@ struct ceph_osd_req_op {
u32 src_fadvise_flags;
struct ceph_osd_data osd_data;
} copy_from;
+ struct {
+ u64 ver;
+ } assert_ver;
};
};
@@ -199,6 +255,7 @@ struct ceph_osd_request {
struct ceph_osd_client *r_osdc;
struct kref r_kref;
bool r_mempool;
+ bool r_linger; /* don't resend on failure */
struct completion r_completion; /* private to osd_client.c */
ceph_osdc_callback_t r_callback;
@@ -211,9 +268,9 @@ struct ceph_osd_request {
struct ceph_snap_context *r_snapc; /* for writes */
struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
- bool r_linger; /* don't resend on failure */
/* internal */
+ u64 r_version; /* data version sent in reply */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
ktime_t r_start_latency; /* ktime_t */
@@ -450,6 +507,8 @@ void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
unsigned int which,
struct ceph_bvec_iter *bvec_pos);
+void osd_req_op_extent_osd_iter(struct ceph_osd_request *osd_req,
+ unsigned int which, struct iov_iter *iter);
extern void osd_req_op_cls_request_data_pagelist(struct ceph_osd_request *,
unsigned int which,
@@ -504,6 +563,20 @@ extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *,
u32 truncate_seq, u64 truncate_size,
bool use_mempool);
+int __ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op, int cnt);
+
+/*
+ * How big an extent array should we preallocate for a sparse read? This is
+ * just a starting value. If we get more than this back from the OSD, the
+ * receiver will reallocate.
+ */
+#define CEPH_SPARSE_EXT_ARRAY_INITIAL 16
+
+static inline int ceph_alloc_sparse_ext_map(struct ceph_osd_req_op *op)
+{
+ return __ceph_alloc_sparse_ext_map(op, CEPH_SPARSE_EXT_ARRAY_INITIAL);
+}
+
extern void ceph_osdc_get_request(struct ceph_osd_request *req);
extern void ceph_osdc_put_request(struct ceph_osd_request *req);
@@ -558,5 +631,19 @@ int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_locator *oloc,
struct ceph_watch_item **watchers,
u32 *num_watchers);
-#endif
+/* Find offset into the buffer of the end of the extent map */
+static inline u64 ceph_sparse_ext_map_end(struct ceph_osd_req_op *op)
+{
+ struct ceph_sparse_extent *ext;
+
+ /* No extents? No data */
+ if (op->extent.sparse_ext_cnt == 0)
+ return 0;
+
+ ext = &op->extent.sparse_ext[op->extent.sparse_ext_cnt - 1];
+
+ return ext->off + ext->len - op->extent.offset;
+}
+
+#endif
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 43a7a1573b51..73c3efbec36c 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -524,6 +524,10 @@ struct ceph_osd_op {
__le64 cookie;
} __attribute__ ((packed)) notify;
struct {
+ __le64 unused;
+ __le64 ver;
+ } __attribute__ ((packed)) assert_ver;
+ struct {
__le64 offset, length;
__le64 src_offset;
} __attribute__ ((packed)) clonerange;
diff --git a/include/linux/cfi.h b/include/linux/cfi.h
index 5e134f4ce8b7..3552ec82b725 100644
--- a/include/linux/cfi.h
+++ b/include/linux/cfi.h
@@ -19,11 +19,13 @@ static inline enum bug_trap_type report_cfi_failure_noaddr(struct pt_regs *regs,
{
return report_cfi_failure(regs, addr, NULL, 0);
}
+#endif /* CONFIG_CFI_CLANG */
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
bool is_cfi_trap(unsigned long addr);
+#else
+static inline bool is_cfi_trap(unsigned long addr) { return false; }
#endif
-#endif /* CONFIG_CFI_CLANG */
#ifdef CONFIG_MODULES
#ifdef CONFIG_ARCH_USES_CFI_TRAPS
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index ae20dbb885d6..f1b3151ac30b 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -342,6 +342,20 @@ struct cgroup_rstat_cpu {
struct cgroup_base_stat last_bstat;
/*
+ * This field is used to record the cumulative per-cpu time of
+ * the cgroup and its descendants. Currently it can be read via
+ * eBPF/drgn etc, and we are still trying to determine how to
+ * expose it in the cgroupfs interface.
+ */
+ struct cgroup_base_stat subtree_bstat;
+
+ /*
+ * Snapshots at the last reading. These are used to calculate the
+ * deltas to propagate to the per-cpu subtree_bstat.
+ */
+ struct cgroup_base_stat last_subtree_bstat;
+
+ /*
* Child cgroups with stat updates on this cpu since the last read
* are linked on the parent's ->updated_children through
* ->updated_next.
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 0f0cd01906b4..ec32ec58c59f 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -1379,7 +1379,7 @@ struct clk_onecell_data {
struct clk_hw_onecell_data {
unsigned int num;
- struct clk_hw *hws[];
+ struct clk_hw *hws[] __counted_by(num);
};
#define CLK_OF_DECLARE(name, compat, fn) \
diff --git a/include/linux/clk/mmp.h b/include/linux/clk/mmp.h
deleted file mode 100644
index 445130460380..000000000000
--- a/include/linux/clk/mmp.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __CLK_MMP_H
-#define __CLK_MMP_H
-
-#include <linux/types.h>
-
-extern void pxa168_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-extern void pxa910_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys,
- phys_addr_t apbcp_phys);
-extern void mmp2_clk_init(phys_addr_t mpmu_phys,
- phys_addr_t apmu_phys,
- phys_addr_t apbc_phys);
-
-#endif
diff --git a/include/linux/console.h b/include/linux/console.h
index d3195664baa5..7de11c763eb3 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -154,6 +154,8 @@ static inline int con_debug_leave(void)
* receiving the printk spam for obvious reasons.
* @CON_EXTENDED: The console supports the extended output format of
* /dev/kmesg which requires a larger output buffer.
+ * @CON_SUSPENDED: Indicates if a console is suspended. If true, the
+ * printing callbacks must not be called.
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -163,6 +165,7 @@ enum cons_flags {
CON_ANYTIME = BIT(4),
CON_BRL = BIT(5),
CON_EXTENDED = BIT(6),
+ CON_SUSPENDED = BIT(7),
};
/**
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index bf70987240e4..a269fffaf991 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -6,6 +6,8 @@
#ifndef _LINUX_CORESIGHT_H
#define _LINUX_CORESIGHT_H
+#include <linux/amba/bus.h>
+#include <linux/clk.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/perf_event.h>
@@ -386,6 +388,63 @@ static inline u32 csdev_access_relaxed_read32(struct csdev_access *csa,
return csa->read(offset, true, false);
}
+#define CORESIGHT_CIDRn(i) (0xFF0 + ((i) * 4))
+
+static inline u32 coresight_get_cid(void __iomem *base)
+{
+ u32 i, cid = 0;
+
+ for (i = 0; i < 4; i++)
+ cid |= readl(base + CORESIGHT_CIDRn(i)) << (i * 8);
+
+ return cid;
+}
+
+static inline bool is_coresight_device(void __iomem *base)
+{
+ u32 cid = coresight_get_cid(base);
+
+ return cid == CORESIGHT_CID;
+}
+
+/*
+ * Attempt to find and enable "APB clock" for the given device
+ *
+ * Returns:
+ *
+ * clk - Clock is found and enabled
+ * NULL - clock is not found
+ * ERROR - Clock is found but failed to enable
+ */
+static inline struct clk *coresight_get_enable_apb_pclk(struct device *dev)
+{
+ struct clk *pclk;
+ int ret;
+
+ pclk = clk_get(dev, "apb_pclk");
+ if (IS_ERR(pclk))
+ return NULL;
+
+ ret = clk_prepare_enable(pclk);
+ if (ret) {
+ clk_put(pclk);
+ return ERR_PTR(ret);
+ }
+ return pclk;
+}
+
+#define CORESIGHT_PIDRn(i) (0xFE0 + ((i) * 4))
+
+static inline u32 coresight_get_pid(struct csdev_access *csa)
+{
+ u32 i, pid = 0;
+
+ for (i = 0; i < 4; i++)
+ pid |= csdev_access_relaxed_read32(csa, CORESIGHT_PIDRn(i)) << (i * 8);
+
+ return pid;
+}
+
static inline u64 csdev_access_relaxed_read_pair(struct csdev_access *csa,
u32 lo_offset, u32 hi_offset)
{
diff --git a/include/linux/counter.h b/include/linux/counter.h
index b63746637de2..702e9108bbb4 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -399,7 +399,7 @@ struct counter_device {
struct mutex ops_exist_lock;
};
-void *counter_priv(const struct counter_device *const counter);
+void *counter_priv(const struct counter_device *const counter) __attribute_const__;
struct counter_device *counter_alloc(size_t sizeof_priv);
void counter_put(struct counter_device *const counter);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 43b363a99215..71d186d6933a 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -141,6 +141,9 @@ struct cpufreq_policy {
*/
bool dvfs_possible_from_any_cpu;
+ /* Per policy boost enabled flag. */
+ bool boost_enabled;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
unsigned int cached_resolved_idx;
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index de62a722431e..0c06561bf5ff 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -28,6 +28,8 @@
VMCOREINFO_BYTES)
typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
+/* Per cpu memory for storing cpu states in case of system crash. */
+extern note_buf_t __percpu *crash_notes;
void crash_update_vmcoreinfo_safecopy(void *ptr);
void crash_save_vmcoreinfo(void);
@@ -84,4 +86,29 @@ int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base);
+/* Alignment required for elf header segment */
+#define ELF_CORE_HEADER_ALIGN 4096
+
+struct crash_mem {
+ unsigned int max_nr_ranges;
+ unsigned int nr_ranges;
+ struct range ranges[];
+};
+
+extern int crash_exclude_mem_range(struct crash_mem *mem,
+ unsigned long long mstart,
+ unsigned long long mend);
+extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
+ void **addr, unsigned long *sz);
+
+struct kimage;
+struct kexec_segment;
+
+#define KEXEC_CRASH_HP_NONE 0
+#define KEXEC_CRASH_HP_ADD_CPU 1
+#define KEXEC_CRASH_HP_REMOVE_CPU 2
+#define KEXEC_CRASH_HP_ADD_MEMORY 3
+#define KEXEC_CRASH_HP_REMOVE_MEMORY 4
+#define KEXEC_CRASH_HP_INVALID_CPU -1U
+
#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 9ed9232af934..f923528d5cc4 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -164,7 +164,6 @@ extern void abort_creds(struct cred *);
extern const struct cred *override_creds(const struct cred *);
extern void revert_creds(const struct cred *);
extern struct cred *prepare_kernel_cred(struct task_struct *);
-extern int change_create_files_as(struct cred *, struct inode *);
extern int set_security_override(struct cred *, u32);
extern int set_security_override_from_ctx(struct cred *, const char *);
extern int set_create_files_as(struct cred *, struct inode *);
diff --git a/include/linux/damon.h b/include/linux/damon.h
index d5d4d19928e0..ae2664d1d5f1 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -226,16 +226,26 @@ struct damos_stat {
* enum damos_filter_type - Type of memory for &struct damos_filter
* @DAMOS_FILTER_TYPE_ANON: Anonymous pages.
* @DAMOS_FILTER_TYPE_MEMCG: Specific memcg's pages.
+ * @DAMOS_FILTER_TYPE_ADDR: Address range.
+ * @DAMOS_FILTER_TYPE_TARGET: Data Access Monitoring target.
* @NR_DAMOS_FILTER_TYPES: Number of filter types.
*
- * The support of each filter type is up to running &struct damon_operations.
- * &enum DAMON_OPS_PADDR is supporting all filter types, while
- * &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR are not supporting any
- * filter types.
+ * The anon pages type and memcg type filters are handled by underlying
+ * &struct damon_operations as a part of scheme action trying, and therefore
+ * accounted as 'tried'. In contrast, other types are handled by core layer
+ * before trying of the action and therefore not accounted as 'tried'.
+ *
+ * The support of the filters that handled by &struct damon_operations depend
+ * on the running &struct damon_operations.
+ * &enum DAMON_OPS_PADDR supports both anon pages type and memcg type filters,
+ * while &enum DAMON_OPS_VADDR and &enum DAMON_OPS_FVADDR don't support any of
+ * the two types.
*/
enum damos_filter_type {
DAMOS_FILTER_TYPE_ANON,
DAMOS_FILTER_TYPE_MEMCG,
+ DAMOS_FILTER_TYPE_ADDR,
+ DAMOS_FILTER_TYPE_TARGET,
NR_DAMOS_FILTER_TYPES,
};
@@ -244,18 +254,24 @@ enum damos_filter_type {
* @type: Type of the page.
* @matching: If the matching page should filtered out or in.
* @memcg_id: Memcg id of the question if @type is DAMOS_FILTER_MEMCG.
+ * @addr_range: Address range if @type is DAMOS_FILTER_TYPE_ADDR.
+ * @target_idx: Index of the &struct damon_target of
+ * &damon_ctx->adaptive_targets if @type is
+ * DAMOS_FILTER_TYPE_TARGET.
* @list: List head for siblings.
*
* Before applying the &damos->action to a memory region, DAMOS checks if each
* page of the region matches to this and avoid applying the action if so.
- * Note that the check support is up to &struct damon_operations
- * implementation.
+ * Support of each filter type depends on the running &struct damon_operations
+ * and the type. Refer to &enum damos_filter_type for more detai.
*/
struct damos_filter {
enum damos_filter_type type;
bool matching;
union {
unsigned short memcg_id;
+ struct damon_addr_range addr_range;
+ int target_idx;
};
struct list_head list;
};
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 261944ec0887..22cd9902345d 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -241,10 +241,10 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, unsigned int order,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
- enum page_entry_size pe_size, pfn_t pfn);
+ unsigned int order, pfn_t pfn);
int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index);
int dax_invalidate_mapping_entry_sync(struct address_space *mapping,
pgoff_t index);
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
index 8904063d4c9f..6bfe70decc9f 100644
--- a/include/linux/dev_printk.h
+++ b/include/linux/dev_printk.h
@@ -274,4 +274,6 @@ do { \
WARN_ONCE(condition, "%s %s: " format, \
dev_driver_string(dev), dev_name(dev), ## arg)
+__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
+
#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/device.h b/include/linux/device.h
index bbaeabd04b0d..56d93a1ffb7b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -349,6 +349,7 @@ unsigned long devm_get_free_pages(struct device *dev,
gfp_t gfp_mask, unsigned int order);
void devm_free_pages(struct device *dev, unsigned long addr);
+#ifdef CONFIG_HAS_IOMEM
void __iomem *devm_ioremap_resource(struct device *dev,
const struct resource *res);
void __iomem *devm_ioremap_resource_wc(struct device *dev,
@@ -357,6 +358,31 @@ void __iomem *devm_ioremap_resource_wc(struct device *dev,
void __iomem *devm_of_iomap(struct device *dev,
struct device_node *node, int index,
resource_size_t *size);
+#else
+
+static inline
+void __iomem *devm_ioremap_resource(struct device *dev,
+ const struct resource *res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline
+void __iomem *devm_of_iomap(struct device *dev,
+ struct device_node *node, int index,
+ resource_size_t *size)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif
/* allows to add/remove a custom action to devres stack */
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
@@ -625,7 +651,10 @@ struct device_physical_location {
* @dma_pools: Dma pools (if dma'ble device).
* @dma_mem: Internal for coherent mem override.
* @cma_area: Contiguous memory area for dma allocations
- * @dma_io_tlb_mem: Pointer to the swiotlb pool used. Not for driver use.
+ * @dma_io_tlb_mem: Software IO TLB allocator. Not for driver use.
+ * @dma_io_tlb_pools: List of transient swiotlb memory pools.
+ * @dma_io_tlb_lock: Protects changes to the list of active pools.
+ * @dma_uses_io_tlb: %true if device has used the software IO TLB.
* @archdata: For arch-specific additions.
* @of_node: Associated device tree node.
* @fwnode: Associated device node supplied by platform firmware.
@@ -732,6 +761,11 @@ struct device {
#ifdef CONFIG_SWIOTLB
struct io_tlb_mem *dma_io_tlb_mem;
#endif
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head dma_io_tlb_pools;
+ spinlock_t dma_io_tlb_lock;
+ bool dma_uses_io_tlb;
+#endif
/* arch specific additions */
struct dev_archdata archdata;
@@ -1215,8 +1249,6 @@ void device_link_remove(void *consumer, struct device *supplier);
void device_links_supplier_sync_state_pause(void);
void device_links_supplier_sync_state_resume(void);
-__printf(3, 4) int dev_err_probe(const struct device *dev, int err, const char *fmt, ...);
-
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor))
diff --git a/include/linux/dlm_plock.h b/include/linux/dlm_plock.h
index e6d76e8715a6..15fc856d198c 100644
--- a/include/linux/dlm_plock.h
+++ b/include/linux/dlm_plock.h
@@ -11,6 +11,8 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
int cmd, struct file_lock *fl);
int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
+int dlm_posix_cancel(dlm_lockspace_t *lockspace, u64 number, struct file *file,
+ struct file_lock *fl);
int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
struct file_lock *fl);
#endif
diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h
index 9bf19b5bf755..f2fc203fb8a1 100644
--- a/include/linux/dma-map-ops.h
+++ b/include/linux/dma-map-ops.h
@@ -169,12 +169,6 @@ static inline void dma_free_contiguous(struct device *dev, struct page *page,
}
#endif /* CONFIG_DMA_CMA*/
-#ifdef CONFIG_DMA_PERNUMA_CMA
-void dma_pernuma_cma_reserve(void);
-#else
-static inline void dma_pernuma_cma_reserve(void) { }
-#endif /* CONFIG_DMA_PERNUMA_CMA */
-
#ifdef CONFIG_DMA_DECLARE_COHERENT
int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
dma_addr_t device_addr, size_t size);
@@ -343,6 +337,12 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
+#ifdef CONFIG_ARCH_HAS_DMA_SET_MASK
+void arch_dma_set_mask(struct device *dev, u64 mask);
+#else
+#define arch_dma_set_mask(dev, mask) do { } while (0)
+#endif
+
#ifdef CONFIG_MMU
/*
* Page protection so that devices that can't snoop CPU caches can use the
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index e13050eb9777..f0ccca16a0ac 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -418,6 +418,8 @@ static inline void dma_sync_sgtable_for_device(struct device *dev,
#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
+bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size);
+
static inline void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp)
{
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 27dbd4c64860..e34b601b71fd 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -106,8 +106,6 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void);
extern void dmar_register_bus_notifier(void);
-extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
- struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
extern void dmar_free_dev_scope(struct dmar_dev_scope **devices, int *cnt);
extern int dmar_insert_dev_scope(struct dmar_pci_notify_info *info,
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h
index 061dd84d09f3..4fcbf4d4fd0a 100644
--- a/include/linux/dynamic_debug.h
+++ b/include/linux/dynamic_debug.h
@@ -37,10 +37,12 @@ struct _ddebug {
#define _DPRINTK_FLAGS_INCL_FUNCNAME (1<<2)
#define _DPRINTK_FLAGS_INCL_LINENO (1<<3)
#define _DPRINTK_FLAGS_INCL_TID (1<<4)
+#define _DPRINTK_FLAGS_INCL_SOURCENAME (1<<5)
#define _DPRINTK_FLAGS_INCL_ANY \
(_DPRINTK_FLAGS_INCL_MODNAME | _DPRINTK_FLAGS_INCL_FUNCNAME |\
- _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID)
+ _DPRINTK_FLAGS_INCL_LINENO | _DPRINTK_FLAGS_INCL_TID |\
+ _DPRINTK_FLAGS_INCL_SOURCENAME)
#if defined DEBUG
#define _DPRINTK_FLAGS_DEFAULT _DPRINTK_FLAGS_PRINT
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5a1e39df8b26..80b21d1c6eaf 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -24,10 +24,11 @@
#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/uuid.h>
-#include <linux/screen_info.h>
#include <asm/page.h>
+struct screen_info;
+
#define EFI_SUCCESS 0
#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
diff --git a/include/linux/elf-fdpic.h b/include/linux/elf-fdpic.h
index 3bea95a1af53..e533f4513194 100644
--- a/include/linux/elf-fdpic.h
+++ b/include/linux/elf-fdpic.h
@@ -10,13 +10,25 @@
#include <uapi/linux/elf-fdpic.h>
+#if ELF_CLASS == ELFCLASS32
+#define Elf_Sword Elf32_Sword
+#define elf_fdpic_loadseg elf32_fdpic_loadseg
+#define elf_fdpic_loadmap elf32_fdpic_loadmap
+#define ELF_FDPIC_LOADMAP_VERSION ELF32_FDPIC_LOADMAP_VERSION
+#else
+#define Elf_Sword Elf64_Sxword
+#define elf_fdpic_loadmap elf64_fdpic_loadmap
+#define elf_fdpic_loadseg elf64_fdpic_loadseg
+#define ELF_FDPIC_LOADMAP_VERSION ELF64_FDPIC_LOADMAP_VERSION
+#endif
+
/*
* binfmt binary parameters structure
*/
struct elf_fdpic_params {
struct elfhdr hdr; /* ref copy of ELF header */
struct elf_phdr *phdrs; /* ref copy of PT_PHDR table */
- struct elf32_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
+ struct elf_fdpic_loadmap *loadmap; /* loadmap to be passed to userspace */
unsigned long elfhdr_addr; /* mapped ELF header user address */
unsigned long ph_addr; /* mapped PT_PHDR user address */
unsigned long map_addr; /* mapped loadmap user address */
diff --git a/include/linux/evm.h b/include/linux/evm.h
index 7dc1ee74169f..01fc495a83e2 100644
--- a/include/linux/evm.h
+++ b/include/linux/evm.h
@@ -56,9 +56,10 @@ static inline void evm_inode_post_set_acl(struct dentry *dentry,
{
return evm_inode_post_setxattr(dentry, acl_name, NULL, 0);
}
-extern int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm);
+
+int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count);
extern bool evm_revalidate_status(const char *xattr_name);
extern int evm_protected_xattr_if_enabled(const char *req_xattr_name);
extern int evm_read_protected_xattrs(struct dentry *dentry, u8 *buffer,
@@ -157,9 +158,10 @@ static inline void evm_inode_post_set_acl(struct dentry *dentry,
return;
}
-static inline int evm_inode_init_security(struct inode *inode,
- const struct xattr *xattr_array,
- struct xattr *evm)
+static inline int evm_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ struct xattr *xattrs,
+ int *xattr_count)
{
return 0;
}
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index 1c849db953a5..45fca09b2319 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -52,6 +52,8 @@
#ifdef CONFIG_IA64
#define KSYM_FUNC(name) @fptr(name)
+#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
#else
#define KSYM_FUNC(name) name
#endif
diff --git a/include/linux/export.h b/include/linux/export.h
index beed8387e0a4..9911508a9604 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -50,7 +50,7 @@ extern struct module __this_module;
__EXPORT_SYMBOL_REF(sym) ASM_NL \
.previous
-#if !defined(CONFIG_MODULES) || defined(__DISABLE_EXPORTS)
+#if defined(__DISABLE_EXPORTS)
/*
* Allow symbol exports to be disabled completely so that C code may
@@ -75,7 +75,7 @@ extern struct module __this_module;
__ADDRESSABLE(sym) \
asm(__stringify(___EXPORT_SYMBOL(sym, license, ns)))
-#endif /* CONFIG_MODULES */
+#endif
#ifdef DEFAULT_SYMBOL_NAMESPACE
#define _EXPORT_SYMBOL(sym, license) __EXPORT_SYMBOL(sym, license, __stringify(DEFAULT_SYMBOL_NAMESPACE))
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 3c45c3846fe9..e596a0abcb27 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -328,16 +328,4 @@ struct extcon_specific_cable_nb {
struct extcon_dev *edev;
unsigned long previous_value;
};
-
-static inline int extcon_register_interest(struct extcon_specific_cable_nb *obj,
- const char *extcon_name, const char *cable_name,
- struct notifier_block *nb)
-{
- return -EINVAL;
-}
-
-static inline int extcon_unregister_interest(struct extcon_specific_cable_nb *obj)
-{
- return -EINVAL;
-}
#endif /* __LINUX_EXTCON_H__ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index ce7d588edc3e..c14576458228 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -383,7 +383,6 @@ struct fb_tile_ops {
#endif /* CONFIG_FB_TILEBLITTING */
/* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_DEFAULT 0
#define FBINFO_HWACCEL_DISABLED 0x0002
/* When FBINFO_HWACCEL_DISABLED is set:
* Hardware acceleration is turned off. Software implementations
@@ -481,7 +480,9 @@ struct fb_info {
const struct fb_ops *fbops;
struct device *device; /* This is the parent */
+#if defined(CONFIG_FB_DEVICE)
struct device *dev; /* This is this fb device */
+#endif
int class_flag; /* private sysfs flags */
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
@@ -502,8 +503,6 @@ struct fb_info {
bool skip_vt_switch; /* no VT switch on suspend/resume required */
};
-#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
-
/* This will go away
* fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags
* when it wants to turn the acceleration engine on. This is
@@ -527,7 +526,7 @@ extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
/*
- * Drawing operations where framebuffer is in I/O memory
+ * Helpers for framebuffers in I/O memory
*/
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -538,29 +537,25 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
-/*
- * Initializes struct fb_ops for framebuffers in I/O memory.
- */
-
-#define __FB_DEFAULT_IO_OPS_RDWR \
+#define __FB_DEFAULT_IOMEM_OPS_RDWR \
.fb_read = fb_io_read, \
.fb_write = fb_io_write
-#define __FB_DEFAULT_IO_OPS_DRAW \
+#define __FB_DEFAULT_IOMEM_OPS_DRAW \
.fb_fillrect = cfb_fillrect, \
.fb_copyarea = cfb_copyarea, \
.fb_imageblit = cfb_imageblit
-#define __FB_DEFAULT_IO_OPS_MMAP \
+#define __FB_DEFAULT_IOMEM_OPS_MMAP \
.fb_mmap = NULL /* default implementation */
-#define FB_DEFAULT_IO_OPS \
- __FB_DEFAULT_IO_OPS_RDWR, \
- __FB_DEFAULT_IO_OPS_DRAW, \
- __FB_DEFAULT_IO_OPS_MMAP
+#define FB_DEFAULT_IOMEM_OPS \
+ __FB_DEFAULT_IOMEM_OPS_RDWR, \
+ __FB_DEFAULT_IOMEM_OPS_DRAW, \
+ __FB_DEFAULT_IOMEM_OPS_MMAP
/*
- * Drawing operations where framebuffer is in system RAM
+ * Helpers for framebuffers in system memory
*/
extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -571,28 +566,29 @@ extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
+#define __FB_DEFAULT_SYSMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_SYSMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
/*
- * Initializes struct fb_ops for framebuffers in system memory.
+ * Helpers for framebuffers in DMA-able memory
*/
-#define __FB_DEFAULT_SYS_OPS_RDWR \
+#define __FB_DEFAULT_DMAMEM_OPS_RDWR \
.fb_read = fb_sys_read, \
.fb_write = fb_sys_write
-#define __FB_DEFAULT_SYS_OPS_DRAW \
+#define __FB_DEFAULT_DMAMEM_OPS_DRAW \
.fb_fillrect = sys_fillrect, \
.fb_copyarea = sys_copyarea, \
.fb_imageblit = sys_imageblit
-#define __FB_DEFAULT_SYS_OPS_MMAP \
- .fb_mmap = NULL /* default implementation */
-
-#define FB_DEFAULT_SYS_OPS \
- __FB_DEFAULT_SYS_OPS_RDWR, \
- __FB_DEFAULT_SYS_OPS_DRAW, \
- __FB_DEFAULT_SYS_OPS_MMAP
-
-/* drivers/video/fbmem.c */
+/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
@@ -609,7 +605,6 @@ extern int fb_new_modelist(struct fb_info *info);
extern bool fb_center_logo;
extern int fb_logo_count;
-extern struct class *fb_class;
static inline void lock_fb_info(struct fb_info *info)
{
@@ -636,7 +631,7 @@ static inline void __fb_pad_aligned_buffer(u8 *dst, u32 d_pitch,
}
}
-/* drivers/video/fb_defio.c */
+/* fb_defio.c */
int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma);
extern int fb_deferred_io_init(struct fb_info *info);
extern void fb_deferred_io_open(struct fb_info *info,
@@ -687,11 +682,11 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
__damage_area(info, image->dx, image->dy, image->width, image->height); \
}
-#define FB_GEN_DEFAULT_DEFERRED_IO_OPS(__prefix, __damage_range, __damage_area) \
+#define FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(__prefix, __damage_range, __damage_area) \
__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, io) \
__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, cfb)
-#define FB_GEN_DEFAULT_DEFERRED_SYS_OPS(__prefix, __damage_range, __damage_area) \
+#define FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(__prefix, __damage_range, __damage_area) \
__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
@@ -735,14 +730,11 @@ static inline bool fb_be_math(struct fb_info *info)
#endif /* CONFIG_FB_FOREIGN_ENDIAN */
}
-/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
-extern int fb_init_device(struct fb_info *fb_info);
-extern void fb_cleanup_device(struct fb_info *head);
extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
-/* drivers/video/fbmon.c */
+/* fbmon.c */
#define FB_MAXTIMINGS 0
#define FB_VSYNCTIMINGS 1
#define FB_HSYNCTIMINGS 2
@@ -776,7 +768,7 @@ extern int of_get_fb_videomode(struct device_node *np,
extern int fb_videomode_from_videomode(const struct videomode *vm,
struct fb_videomode *fbmode);
-/* drivers/video/modedb.c */
+/* modedb.c */
#define VESA_MODEDB_SIZE 43
#define DMT_SIZE 0x50
@@ -802,7 +794,7 @@ extern void fb_videomode_to_modelist(const struct fb_videomode *modedb, int num,
extern const struct fb_videomode *fb_find_best_display(const struct fb_monspecs *specs,
struct list_head *head);
-/* drivers/video/fbcmap.c */
+/* fbcmap.c */
extern int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp);
extern int fb_alloc_cmap_gfp(struct fb_cmap *cmap, int len, int transp, gfp_t flags);
extern void fb_dealloc_cmap(struct fb_cmap *cmap);
diff --git a/include/linux/firmware/imx/dsp.h b/include/linux/firmware/imx/dsp.h
index 4f7895a3b73c..1f176a2683fe 100644
--- a/include/linux/firmware/imx/dsp.h
+++ b/include/linux/firmware/imx/dsp.h
@@ -37,17 +37,11 @@ struct imx_dsp_ipc {
static inline void imx_dsp_set_data(struct imx_dsp_ipc *ipc, void *data)
{
- if (!ipc)
- return;
-
ipc->private_data = data;
}
static inline void *imx_dsp_get_data(struct imx_dsp_ipc *ipc)
{
- if (!ipc)
- return NULL;
-
return ipc->private_data;
}
diff --git a/include/linux/firmware/imx/sci.h b/include/linux/firmware/imx/sci.h
index 5cc63fe7e84d..df17196df5ff 100644
--- a/include/linux/firmware/imx/sci.h
+++ b/include/linux/firmware/imx/sci.h
@@ -21,31 +21,37 @@ int imx_scu_enable_general_irq_channel(struct device *dev);
int imx_scu_irq_register_notifier(struct notifier_block *nb);
int imx_scu_irq_unregister_notifier(struct notifier_block *nb);
int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable);
+int imx_scu_irq_get_status(u8 group, u32 *irq_status);
int imx_scu_soc_init(struct device *dev);
#else
static inline int imx_scu_soc_init(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_enable_general_irq_channel(struct device *dev)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_register_notifier(struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_unregister_notifier(struct notifier_block *nb)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
}
static inline int imx_scu_irq_group_enable(u8 group, u32 mask, u8 enable)
{
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
+}
+
+static inline int imx_scu_irq_get_status(u8 group, u32 *irq_status)
+{
+ return -EOPNOTSUPP;
}
#endif
#endif /* _SC_SCI_H */
diff --git a/include/linux/firmware/intel/stratix10-smc.h b/include/linux/firmware/intel/stratix10-smc.h
index a718f853d457..ee80ca4bb0d0 100644
--- a/include/linux/firmware/intel/stratix10-smc.h
+++ b/include/linux/firmware/intel/stratix10-smc.h
@@ -467,6 +467,31 @@ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FPGA_CONFIG_COMPLETED_WRITE)
INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_FIRMWARE_VERSION)
/**
+ * SMC call protocol for Mailbox, starting FUNCID from 60
+ *
+ * Call register usage:
+ * a0 INTEL_SIP_SMC_MBOX_SEND_CMD
+ * a1 mailbox command code
+ * a2 physical address that contain mailbox command data (not include header)
+ * a3 mailbox command data size in word
+ * a4 set to 0 for CASUAL, set to 1 for URGENT
+ * a5 physical address for secure firmware to put response data
+ * (not include header)
+ * a6 maximum size in word of physical address to store response data
+ * a7 not used
+ *
+ * Return status
+ * a0 INTEL_SIP_SMC_STATUS_OK, INTEL_SIP_SMC_STATUS_REJECTED or
+ * INTEL_SIP_SMC_STATUS_ERROR
+ * a1 mailbox error code
+ * a2 response data length in word
+ * a3 not used
+ */
+#define INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD 60
+ #define INTEL_SIP_SMC_MBOX_SEND_CMD \
+ INTEL_SIP_SMC_FAST_CALL_VAL(INTEL_SIP_SMC_FUNCID_MBOX_SEND_CMD)
+
+/**
* Request INTEL_SIP_SMC_SVC_VERSION
*
* Sync call used to query the SIP SMC API Version
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index 0c16037fd08d..60ed82112680 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -118,6 +118,9 @@ struct stratix10_svc_chan;
* @COMMAND_SMC_SVC_VERSION: Non-mailbox SMC SVC API Version,
* return status is SVC_STATUS_OK
*
+ * @COMMAND_MBOX_SEND_CMD: send generic mailbox command, return status is
+ * SVC_STATUS_OK or SVC_STATUS_ERROR
+ *
* @COMMAND_RSU_DCMF_STATUS: query firmware for the DCMF status
* return status is SVC_STATUS_OK or SVC_STATUS_ERROR
*
@@ -164,6 +167,8 @@ enum stratix10_svc_command_code {
COMMAND_FCS_RANDOM_NUMBER_GEN,
/* for general status poll */
COMMAND_POLL_SERVICE_STATUS = 40,
+ /* for generic mailbox send command */
+ COMMAND_MBOX_SEND_CMD = 100,
/* Non-mailbox SMC Call */
COMMAND_SMC_SVC_VERSION = 200,
};
diff --git a/include/linux/firmware/mediatek/mtk-adsp-ipc.h b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
index 28fd313340b8..5b1d16fa3f56 100644
--- a/include/linux/firmware/mediatek/mtk-adsp-ipc.h
+++ b/include/linux/firmware/mediatek/mtk-adsp-ipc.h
@@ -46,17 +46,11 @@ struct mtk_adsp_ipc {
static inline void mtk_adsp_ipc_set_data(struct mtk_adsp_ipc *ipc, void *data)
{
- if (!ipc)
- return;
-
ipc->private_data = data;
}
static inline void *mtk_adsp_ipc_get_data(struct mtk_adsp_ipc *ipc)
{
- if (!ipc)
- return NULL;
-
return ipc->private_data;
}
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index 250ea4efb7cb..0c091a3f6d49 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -75,7 +75,7 @@ struct qcom_scm_pas_metadata {
extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
size_t size,
struct qcom_scm_pas_metadata *ctx);
-void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
+extern void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
phys_addr_t size);
extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 9dda7d9898ff..e8b12ec8b060 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -34,6 +34,19 @@
/* PM API versions */
#define PM_API_VERSION_2 2
+#define PM_PINCTRL_PARAM_SET_VERSION 2
+
+#define ZYNQMP_FAMILY_CODE 0x23
+#define VERSAL_FAMILY_CODE 0x26
+
+/* When all subfamily of platform need to support */
+#define ALL_SUB_FAMILY_CODE 0x00
+#define VERSAL_SUB_FAMILY_CODE 0x01
+#define VERSALNET_SUB_FAMILY_CODE 0x03
+
+#define FAMILY_CODE_MASK GENMASK(27, 21)
+#define SUB_FAMILY_CODE_MASK GENMASK(20, 19)
+
/* ATF only commands */
#define TF_A_PM_REGISTER_SGI 0xa04
#define PM_GET_TRUSTZONE_VERSION 0xa03
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
deleted file mode 100644
index eaa0ac5f9003..000000000000
--- a/include/linux/frontswap.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_FRONTSWAP_H
-#define _LINUX_FRONTSWAP_H
-
-#include <linux/swap.h>
-#include <linux/mm.h>
-#include <linux/bitops.h>
-#include <linux/jump_label.h>
-
-struct frontswap_ops {
- void (*init)(unsigned); /* this swap type was just swapon'ed */
- int (*store)(unsigned, pgoff_t, struct page *); /* store a page */
- int (*load)(unsigned, pgoff_t, struct page *, bool *); /* load a page */
- void (*invalidate_page)(unsigned, pgoff_t); /* page no longer needed */
- void (*invalidate_area)(unsigned); /* swap type just swapoff'ed */
-};
-
-int frontswap_register_ops(const struct frontswap_ops *ops);
-
-extern void frontswap_init(unsigned type, unsigned long *map);
-extern int __frontswap_store(struct page *page);
-extern int __frontswap_load(struct page *page);
-extern void __frontswap_invalidate_page(unsigned, pgoff_t);
-extern void __frontswap_invalidate_area(unsigned);
-
-#ifdef CONFIG_FRONTSWAP
-extern struct static_key_false frontswap_enabled_key;
-
-static inline bool frontswap_enabled(void)
-{
- return static_branch_unlikely(&frontswap_enabled_key);
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
- p->frontswap_map = map;
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return p->frontswap_map;
-}
-#else
-/* all inline routines become no-ops and all externs are ignored */
-
-static inline bool frontswap_enabled(void)
-{
- return false;
-}
-
-static inline void frontswap_map_set(struct swap_info_struct *p,
- unsigned long *map)
-{
-}
-
-static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
-{
- return NULL;
-}
-#endif
-
-static inline int frontswap_store(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_store(page);
-
- return -1;
-}
-
-static inline int frontswap_load(struct page *page)
-{
- if (frontswap_enabled())
- return __frontswap_load(page);
-
- return -1;
-}
-
-static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_page(type, offset);
-}
-
-static inline void frontswap_invalidate_area(unsigned type)
-{
- if (frontswap_enabled())
- __frontswap_invalidate_area(type);
-}
-
-#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index dda08d973639..b528f063e8ff 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -478,11 +478,11 @@ struct address_space {
atomic_t nr_thps;
#endif
struct rb_root_cached i_mmap;
- struct rw_semaphore i_mmap_rwsem;
unsigned long nrpages;
pgoff_t writeback_index;
const struct address_space_operations *a_ops;
unsigned long flags;
+ struct rw_semaphore i_mmap_rwsem;
errseq_t wb_err;
spinlock_t private_lock;
struct list_head private_list;
@@ -1508,47 +1508,18 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
kgid_has_mapping(fs_userns, kgid);
}
-struct timespec64 current_mgtime(struct inode *inode);
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
-/*
- * Multigrain timestamps
- *
- * Conditionally use fine-grained ctime and mtime timestamps when there
- * are users actively observing them via getattr. The primary use-case
- * for this is NFS clients that use the ctime to distinguish between
- * different states of the file, and that are often fooled by multiple
- * operations that occur in the same coarse-grained timer tick.
- *
- * The kernel always keeps normalized struct timespec64 values in the ctime,
- * which means that only the first 30 bits of the value are used. Use the
- * 31st bit of the ctime's tv_nsec field as a flag to indicate that the value
- * has been queried since it was last updated.
- */
-#define I_CTIME_QUERIED (1L<<30)
-
/**
* inode_get_ctime - fetch the current ctime from the inode
* @inode: inode from which to fetch ctime
*
- * Grab the current ctime tv_nsec field from the inode, mask off the
- * I_CTIME_QUERIED flag and return it. This is mostly intended for use by
- * internal consumers of the ctime that aren't concerned with ensuring a
- * fine-grained update on the next change (e.g. when preparing to store
- * the value in the backing store for later retrieval).
- *
- * This is safe to call regardless of whether the underlying filesystem
- * is using multigrain timestamps.
+ * Grab the current ctime from the inode and return it.
*/
static inline struct timespec64 inode_get_ctime(const struct inode *inode)
{
- struct timespec64 ctime;
-
- ctime.tv_sec = inode->__i_ctime.tv_sec;
- ctime.tv_nsec = inode->__i_ctime.tv_nsec & ~I_CTIME_QUERIED;
-
- return ctime;
+ return inode->__i_ctime;
}
/**
@@ -2334,7 +2305,6 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
-#define FS_MGTIME 64 /* FS uses multigrain timestamps */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2358,17 +2328,6 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-/**
- * is_mgtime: is this inode using multigrain timestamps
- * @inode: inode to test for multigrain timestamps
- *
- * Return true if the inode uses multigrain timestamps, false otherwise.
- */
-static inline bool is_mgtime(const struct inode *inode)
-{
- return inode->i_sb->s_type->fs_flags & FS_MGTIME;
-}
-
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2397,6 +2356,7 @@ struct super_block *sget(struct file_system_type *type,
int (*test)(struct super_block *,void *),
int (*set)(struct super_block *,void *),
int flags, void *data);
+struct super_block *sget_dev(struct fs_context *fc, dev_t dev);
/* Alas, no aliases. Too much hassle with bringing module.h everywhere */
#define fops_get(fops) \
@@ -3053,7 +3013,6 @@ extern void page_put_link(void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
diff --git a/include/linux/fs_uart_pd.h b/include/linux/fs_uart_pd.h
deleted file mode 100644
index 36b61ff39277..000000000000
--- a/include/linux/fs_uart_pd.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Platform information definitions for the CPM Uart driver.
- *
- * 2006 (c) MontaVista Software, Inc.
- * Vitaly Bordug <vbordug@ru.mvista.com>
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#ifndef FS_UART_PD_H
-#define FS_UART_PD_H
-
-#include <asm/types.h>
-
-enum fs_uart_id {
- fsid_smc1_uart,
- fsid_smc2_uart,
- fsid_scc1_uart,
- fsid_scc2_uart,
- fsid_scc3_uart,
- fsid_scc4_uart,
- fs_uart_nr,
-};
-
-static inline int fs_uart_id_scc2fsid(int id)
-{
- return fsid_scc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2scc(int id)
-{
- return id - fsid_scc1_uart + 1;
-}
-
-static inline int fs_uart_id_smc2fsid(int id)
-{
- return fsid_smc1_uart + id - 1;
-}
-
-static inline int fs_uart_id_fsid2smc(int id)
-{
- return id - fsid_smc1_uart + 1;
-}
-
-struct fs_uart_platform_info {
- void(*init_ioports)(struct fs_uart_platform_info *);
- /* device specific information */
- int fs_no; /* controller index */
- char fs_type[4]; /* controller type */
- u32 uart_clk;
- u8 tx_num_fifo;
- u8 tx_buf_size;
- u8 rx_num_fifo;
- u8 rx_buf_size;
- u8 brg;
- u8 clk_rx;
- u8 clk_tx;
-};
-
-static inline int fs_uart_get_id(struct fs_uart_platform_info *fpi)
-{
- if(strstr(fpi->fs_type, "SMC"))
- return fs_uart_id_smc2fsid(fpi->fs_no);
- if(strstr(fpi->fs_type, "SCC"))
- return fs_uart_id_scc2fsid(fpi->fs_no);
- return fpi->fs_no;
-}
-
-#endif
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index d7d96c806bff..c0892d75ce33 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -760,9 +760,6 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark,
/* Find mark belonging to given group in the list of marks */
extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
struct fsnotify_group *group);
-/* Get cached fsid of filesystem containing object */
-extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn,
- __kernel_fsid_t *fsid);
/* attach the mark to the object */
extern int fsnotify_add_mark(struct fsnotify_mark *mark,
fsnotify_connp_t *connp, unsigned int obj_type,
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index aad9cf8876b5..e8921871ef9a 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -862,13 +862,8 @@ extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);
-
-extern void ftrace_disable_daemon(void);
-extern void ftrace_enable_daemon(void);
#else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; }
-static inline void ftrace_disable_daemon(void) { }
-static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
diff --git a/include/linux/gameport.h b/include/linux/gameport.h
index 0a221e768ea4..07e370113b2b 100644
--- a/include/linux/gameport.h
+++ b/include/linux/gameport.h
@@ -63,7 +63,7 @@ struct gameport_driver {
int gameport_open(struct gameport *gameport, struct gameport_driver *drv, int mode);
void gameport_close(struct gameport *gameport);
-#if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE))
+#if IS_REACHABLE(CONFIG_GAMEPORT)
void __gameport_register_port(struct gameport *gameport, struct module *owner);
/* use a define to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
index 2984b0cb24b1..d4da060b7532 100644
--- a/include/linux/genl_magic_func.h
+++ b/include/linux/genl_magic_func.h
@@ -2,6 +2,7 @@
#ifndef GENL_MAGIC_FUNC_H
#define GENL_MAGIC_FUNC_H
+#include <linux/args.h>
#include <linux/build_bug.h>
#include <linux/genl_magic_struct.h>
@@ -23,7 +24,7 @@
#define GENL_struct(tag_name, tag_number, s_name, s_fields) \
[tag_name] = { .type = NLA_NESTED },
-static struct nla_policy CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
+static struct nla_policy CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy)[] = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -209,7 +210,7 @@ static int s_name ## _from_attrs_for_change(struct s_name *s, \
* Magic: define op number to op name mapping {{{1
* {{{2
*/
-static const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
+static const char *CONCATENATE(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
{
switch (cmd) {
#undef GENL_op
@@ -235,7 +236,7 @@ static const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
.cmd = op_name, \
},
-#define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
+#define ZZZ_genl_ops CONCATENATE(GENL_MAGIC_FAMILY, _genl_ops)
static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
#include GENL_MAGIC_INCLUDE_FILE
};
@@ -248,32 +249,32 @@ static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
* and provide register/unregister functions.
* {{{2
*/
-#define ZZZ_genl_family CONCAT_(GENL_MAGIC_FAMILY, _genl_family)
+#define ZZZ_genl_family CONCATENATE(GENL_MAGIC_FAMILY, _genl_family)
static struct genl_family ZZZ_genl_family;
/*
* Magic: define multicast groups
* Magic: define multicast group registration helper
*/
-#define ZZZ_genl_mcgrps CONCAT_(GENL_MAGIC_FAMILY, _genl_mcgrps)
+#define ZZZ_genl_mcgrps CONCATENATE(GENL_MAGIC_FAMILY, _genl_mcgrps)
static const struct genl_multicast_group ZZZ_genl_mcgrps[] = {
#undef GENL_mc_group
#define GENL_mc_group(group) { .name = #group, },
#include GENL_MAGIC_INCLUDE_FILE
};
-enum CONCAT_(GENL_MAGIC_FAMILY, group_ids) {
+enum CONCATENATE(GENL_MAGIC_FAMILY, group_ids) {
#undef GENL_mc_group
-#define GENL_mc_group(group) CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group),
+#define GENL_mc_group(group) CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group),
#include GENL_MAGIC_INCLUDE_FILE
};
#undef GENL_mc_group
#define GENL_mc_group(group) \
-static int CONCAT_(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
+static int CONCATENATE(GENL_MAGIC_FAMILY, _genl_multicast_ ## group)( \
struct sk_buff *skb, gfp_t flags) \
{ \
unsigned int group_id = \
- CONCAT_(GENL_MAGIC_FAMILY, _group_ ## group); \
+ CONCATENATE(GENL_MAGIC_FAMILY, _group_ ## group); \
return genlmsg_multicast(&ZZZ_genl_family, skb, 0, \
group_id, flags); \
}
@@ -289,8 +290,8 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
#ifdef GENL_MAGIC_FAMILY_HDRSZ
.hdrsize = NLA_ALIGN(GENL_MAGIC_FAMILY_HDRSZ),
#endif
- .maxattr = ARRAY_SIZE(CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
- .policy = CONCAT_(GENL_MAGIC_FAMILY, _tla_nl_policy),
+ .maxattr = ARRAY_SIZE(CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy))-1,
+ .policy = CONCATENATE(GENL_MAGIC_FAMILY, _tla_nl_policy),
.ops = ZZZ_genl_ops,
.n_ops = ARRAY_SIZE(ZZZ_genl_ops),
.mcgrps = ZZZ_genl_mcgrps,
@@ -299,12 +300,12 @@ static struct genl_family ZZZ_genl_family __ro_after_init = {
.module = THIS_MODULE,
};
-int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void)
+int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void)
{
return genl_register_family(&ZZZ_genl_family);
}
-void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void)
+void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void)
{
genl_unregister_family(&ZZZ_genl_family);
}
diff --git a/include/linux/genl_magic_struct.h b/include/linux/genl_magic_struct.h
index f81d48987528..a419d93789ff 100644
--- a/include/linux/genl_magic_struct.h
+++ b/include/linux/genl_magic_struct.h
@@ -14,14 +14,12 @@
# error "you need to define GENL_MAGIC_INCLUDE_FILE before inclusion"
#endif
+#include <linux/args.h>
#include <linux/genetlink.h>
#include <linux/types.h>
-#define CONCAT__(a,b) a ## b
-#define CONCAT_(a,b) CONCAT__(a,b)
-
-extern int CONCAT_(GENL_MAGIC_FAMILY, _genl_register)(void);
-extern void CONCAT_(GENL_MAGIC_FAMILY, _genl_unregister)(void);
+extern int CONCATENATE(GENL_MAGIC_FAMILY, _genl_register)(void);
+extern void CONCATENATE(GENL_MAGIC_FAMILY, _genl_unregister)(void);
/*
* Extension of genl attribute validation policies {{{2
diff --git a/include/linux/greybus/svc.h b/include/linux/greybus/svc.h
index 5afaf5f06856..da547fb9071b 100644
--- a/include/linux/greybus/svc.h
+++ b/include/linux/greybus/svc.h
@@ -100,7 +100,4 @@ bool gb_svc_watchdog_enabled(struct gb_svc *svc);
int gb_svc_watchdog_enable(struct gb_svc *svc);
int gb_svc_watchdog_disable(struct gb_svc *svc);
-int gb_svc_protocol_init(void);
-void gb_svc_protocol_exit(void);
-
#endif /* __SVC_H */
diff --git a/include/linux/hid-roccat.h b/include/linux/hid-roccat.h
index 3214fb0815fc..753654fff07f 100644
--- a/include/linux/hid-roccat.h
+++ b/include/linux/hid-roccat.h
@@ -16,7 +16,7 @@
#ifdef __KERNEL__
-int roccat_connect(struct class *klass, struct hid_device *hid,
+int roccat_connect(const struct class *klass, struct hid_device *hid,
int report_size);
void roccat_disconnect(int minor);
int roccat_report_event(int minor, u8 const *data);
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 39e21e3815ad..964ca1f15e3f 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -341,6 +341,29 @@ struct hid_item {
*/
#define MAX_USBHID_BOOT_QUIRKS 4
+/**
+ * DOC: HID quirks
+ * | @HID_QUIRK_NOTOUCH:
+ * | @HID_QUIRK_IGNORE: ignore this device
+ * | @HID_QUIRK_NOGET:
+ * | @HID_QUIRK_HIDDEV_FORCE:
+ * | @HID_QUIRK_BADPAD:
+ * | @HID_QUIRK_MULTI_INPUT:
+ * | @HID_QUIRK_HIDINPUT_FORCE:
+ * | @HID_QUIRK_ALWAYS_POLL:
+ * | @HID_QUIRK_INPUT_PER_APP:
+ * | @HID_QUIRK_X_INVERT:
+ * | @HID_QUIRK_Y_INVERT:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORTS:
+ * | @HID_QUIRK_SKIP_OUTPUT_REPORT_ID:
+ * | @HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP:
+ * | @HID_QUIRK_HAVE_SPECIAL_DRIVER:
+ * | @HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE:
+ * | @HID_QUIRK_FULLSPEED_INTERVAL:
+ * | @HID_QUIRK_NO_INIT_REPORTS:
+ * | @HID_QUIRK_NO_IGNORE:
+ * | @HID_QUIRK_NO_INPUT_SYNC:
+ */
/* BIT(0) reserved for backward compatibility, was HID_QUIRK_INVERT */
#define HID_QUIRK_NOTOUCH BIT(1)
#define HID_QUIRK_IGNORE BIT(2)
@@ -360,6 +383,7 @@ struct hid_item {
#define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP BIT(18)
#define HID_QUIRK_HAVE_SPECIAL_DRIVER BIT(19)
#define HID_QUIRK_INCREMENT_USAGE_ON_DUPLICATE BIT(20)
+#define HID_QUIRK_NOINVERT BIT(21)
#define HID_QUIRK_FULLSPEED_INTERVAL BIT(28)
#define HID_QUIRK_NO_INIT_REPORTS BIT(29)
#define HID_QUIRK_NO_IGNORE BIT(30)
@@ -555,9 +579,9 @@ struct hid_input {
struct hid_report *report;
struct input_dev *input;
const char *name;
- bool registered;
struct list_head reports; /* the list of reports */
unsigned int application; /* application usage for this input */
+ bool registered;
};
enum hid_type {
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 68da30625a6c..99c474de800d 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -439,6 +439,50 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_local(addr);
}
+static inline void memcpy_from_folio(char *to, struct folio *folio,
+ size_t offset, size_t len)
+{
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ const char *from = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_highmem(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(from);
+
+ from += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+}
+
+static inline void memcpy_to_folio(struct folio *folio, size_t offset,
+ const char *from, size_t len)
+{
+ VM_BUG_ON(offset + len > folio_size(folio));
+
+ do {
+ char *to = kmap_local_folio(folio, offset);
+ size_t chunk = len;
+
+ if (folio_test_highmem(folio) &&
+ chunk > PAGE_SIZE - offset_in_page(offset))
+ chunk = PAGE_SIZE - offset_in_page(offset);
+ memcpy(to, from, chunk);
+ kunmap_local(to);
+
+ from += chunk;
+ offset += chunk;
+ len -= chunk;
+ } while (len > 0);
+
+ flush_dcache_folio(folio);
+}
+
/**
* memcpy_from_file_folio - Copy some bytes from a file folio.
* @to: The destination buffer.
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e718dbe928ba..fa0350b0812a 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -140,9 +140,7 @@ bool hugepage_vma_check(struct vm_area_struct *vma, unsigned long vm_flags,
unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
-void prep_transhuge_page(struct page *page);
-void free_transhuge_page(struct page *page);
-
+void folio_prep_large_rmappable(struct folio *folio);
bool can_split_folio(struct folio *folio, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
static inline int split_huge_page(struct page *page)
@@ -282,7 +280,7 @@ static inline bool hugepage_vma_check(struct vm_area_struct *vma,
return false;
}
-static inline void prep_transhuge_page(struct page *page) {}
+static inline void folio_prep_large_rmappable(struct folio *folio) {}
#define transparent_hugepage_flags 0UL
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index ca3c8e10f24a..5b2626063f4f 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -26,6 +26,8 @@ typedef struct { unsigned long pd; } hugepd_t;
#define __hugepd(x) ((hugepd_t) { (x) })
#endif
+void free_huge_folio(struct folio *folio);
+
#ifdef CONFIG_HUGETLB_PAGE
#include <linux/mempolicy.h>
@@ -131,10 +133,8 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *,
struct vm_area_struct *, struct vm_area_struct *);
struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags);
-long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
- struct page **, unsigned long *, unsigned long *,
- long, unsigned int, int *);
+ unsigned long address, unsigned int flags,
+ unsigned int *page_mask);
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
@@ -167,7 +167,6 @@ int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
bool *migratable_cleared);
void folio_putback_active_hugetlb(struct folio *folio);
void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason);
-void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
@@ -297,21 +296,13 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
-static inline struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
- unsigned long address, unsigned int flags)
+static inline struct page *hugetlb_follow_page_mask(
+ struct vm_area_struct *vma, unsigned long address, unsigned int flags,
+ unsigned int *page_mask)
{
BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/
}
-static inline long follow_hugetlb_page(struct mm_struct *mm,
- struct vm_area_struct *vma, struct page **pages,
- unsigned long *position, unsigned long *nr_pages,
- long i, unsigned int flags, int *nonblocking)
-{
- BUG();
- return 0;
-}
-
static inline int copy_hugetlb_page_range(struct mm_struct *dst,
struct mm_struct *src,
struct vm_area_struct *dst_vma,
@@ -851,11 +842,6 @@ static inline struct hstate *folio_hstate(struct folio *folio)
return size_to_hstate(folio_size(folio));
}
-static inline struct hstate *page_hstate(struct page *page)
-{
- return folio_hstate(page_folio(page));
-}
-
static inline unsigned hstate_index_to_shift(unsigned index)
{
return hstates[index].order + PAGE_SHIFT;
@@ -1007,6 +993,11 @@ void hugetlb_register_node(struct node *node);
void hugetlb_unregister_node(struct node *node);
#endif
+/*
+ * Check if a given raw @page in a hugepage is HWPOISON.
+ */
+bool is_raw_hwpoison_page_in_hugepage(struct page *page);
+
#else /* CONFIG_HUGETLB_PAGE */
struct hstate {};
@@ -1067,11 +1058,6 @@ static inline struct hstate *folio_hstate(struct folio *folio)
return NULL;
}
-static inline struct hstate *page_hstate(struct page *page)
-{
- return NULL;
-}
-
static inline struct hstate *size_to_hstate(unsigned long size)
{
return NULL;
diff --git a/include/linux/hw_breakpoint.h b/include/linux/hw_breakpoint.h
index 7fbb45911273..db199d653dd1 100644
--- a/include/linux/hw_breakpoint.h
+++ b/include/linux/hw_breakpoint.h
@@ -90,9 +90,6 @@ extern int dbg_reserve_bp_slot(struct perf_event *bp);
extern int dbg_release_bp_slot(struct perf_event *bp);
extern int reserve_bp_slot(struct perf_event *bp);
extern void release_bp_slot(struct perf_event *bp);
-int arch_reserve_bp_slot(struct perf_event *bp);
-void arch_release_bp_slot(struct perf_event *bp);
-void arch_unregister_hw_breakpoint(struct perf_event *bp);
extern void flush_ptrace_hw_breakpoint(struct task_struct *tsk);
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 3ac3974b3c78..2b00faf98017 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -348,7 +348,7 @@ struct vmtransfer_page_packet_header {
u8 sender_owns_set;
u8 reserved;
u32 range_cnt;
- struct vmtransfer_page_range ranges[1];
+ struct vmtransfer_page_range ranges[];
} __packed;
struct vmgpadl_packet_header {
@@ -665,8 +665,8 @@ struct vmbus_channel_initiate_contact {
u64 interrupt_page;
struct {
u8 msg_sint;
- u8 padding1[3];
- u32 padding2;
+ u8 msg_vtl;
+ u8 reserved[6];
};
};
u64 monitor_page1;
diff --git a/include/linux/i2c-atr.h b/include/linux/i2c-atr.h
new file mode 100644
index 000000000000..4d5da161c225
--- /dev/null
+++ b/include/linux/i2c-atr.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * I2C Address Translator
+ *
+ * Copyright (c) 2019,2022 Luca Ceresoli <luca@lucaceresoli.net>
+ * Copyright (c) 2022,2023 Tomi Valkeinen <tomi.valkeinen@ideasonboard.com>
+ *
+ * Based on i2c-mux.h
+ */
+
+#ifndef _LINUX_I2C_ATR_H
+#define _LINUX_I2C_ATR_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+struct device;
+struct fwnode_handle;
+struct i2c_atr;
+
+/**
+ * struct i2c_atr_ops - Callbacks from ATR to the device driver.
+ * @attach_client: Notify the driver of a new device connected on a child
+ * bus, with the alias assigned to it. The driver must
+ * configure the hardware to use the alias.
+ * @detach_client: Notify the driver of a device getting disconnected. The
+ * driver must configure the hardware to stop using the
+ * alias.
+ *
+ * All these functions return 0 on success, a negative error code otherwise.
+ */
+struct i2c_atr_ops {
+ int (*attach_client)(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client, u16 alias);
+ void (*detach_client)(struct i2c_atr *atr, u32 chan_id,
+ const struct i2c_client *client);
+};
+
+/**
+ * i2c_atr_new() - Allocate and initialize an I2C ATR helper.
+ * @parent: The parent (upstream) adapter
+ * @dev: The device acting as an ATR
+ * @ops: Driver-specific callbacks
+ * @max_adapters: Maximum number of child adapters
+ *
+ * The new ATR helper is connected to the parent adapter but has no child
+ * adapters. Call i2c_atr_add_adapter() to add some.
+ *
+ * Call i2c_atr_delete() to remove.
+ *
+ * Return: pointer to the new ATR helper object, or ERR_PTR
+ */
+struct i2c_atr *i2c_atr_new(struct i2c_adapter *parent, struct device *dev,
+ const struct i2c_atr_ops *ops, int max_adapters);
+
+/**
+ * i2c_atr_delete - Delete an I2C ATR helper.
+ * @atr: I2C ATR helper to be deleted.
+ *
+ * Precondition: all the adapters added with i2c_atr_add_adapter() must be
+ * removed by calling i2c_atr_del_adapter().
+ */
+void i2c_atr_delete(struct i2c_atr *atr);
+
+/**
+ * i2c_atr_add_adapter - Create a child ("downstream") I2C bus.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the new adapter (0 .. max_adapters-1). This value is
+ * passed to the callbacks in `struct i2c_atr_ops`.
+ * @adapter_parent: The device used as the parent of the new i2c adapter, or NULL
+ * to use the i2c-atr device as the parent.
+ * @bus_handle: The fwnode handle that points to the adapter's i2c
+ * peripherals, or NULL.
+ *
+ * After calling this function a new i2c bus will appear. Adding and removing
+ * devices on the downstream bus will result in calls to the
+ * &i2c_atr_ops->attach_client and &i2c_atr_ops->detach_client callbacks for the
+ * driver to assign an alias to the device.
+ *
+ * The adapter's fwnode is set to @bus_handle, or if @bus_handle is NULL the
+ * function looks for a child node whose 'reg' property matches the chan_id
+ * under the i2c-atr device's 'i2c-atr' node.
+ *
+ * Call i2c_atr_del_adapter() to remove the adapter.
+ *
+ * Return: 0 on success, a negative error code otherwise.
+ */
+int i2c_atr_add_adapter(struct i2c_atr *atr, u32 chan_id,
+ struct device *adapter_parent,
+ struct fwnode_handle *bus_handle);
+
+/**
+ * i2c_atr_del_adapter - Remove a child ("downstream") I2C bus added by
+ * i2c_atr_add_adapter(). If no I2C bus has been added
+ * this function is a no-op.
+ * @atr: The I2C ATR
+ * @chan_id: Index of the adapter to be removed (0 .. max_adapters-1)
+ */
+void i2c_atr_del_adapter(struct i2c_atr *atr, u32 chan_id);
+
+/**
+ * i2c_atr_set_driver_data - Set private driver data to the i2c-atr instance.
+ * @atr: The I2C ATR
+ * @data: Pointer to the data to store
+ */
+void i2c_atr_set_driver_data(struct i2c_atr *atr, void *data);
+
+/**
+ * i2c_atr_get_driver_data - Get the stored drive data.
+ * @atr: The I2C ATR
+ *
+ * Return: Pointer to the stored data
+ */
+void *i2c_atr_get_driver_data(struct i2c_atr *atr);
+
+#endif /* _LINUX_I2C_ATR_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 3430cc2b05a6..0dae9db27538 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -237,7 +237,6 @@ enum i2c_driver_flags {
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @probe: Callback for device binding
- * @probe_new: Transitional callback for device binding - do not use
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -272,16 +271,8 @@ enum i2c_driver_flags {
struct i2c_driver {
unsigned int class;
- union {
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *client);
- /*
- * Legacy callback that was part of a conversion of .probe().
- * Today it has the same semantic as .probe(). Don't use for new
- * code.
- */
- int (*probe_new)(struct i2c_client *client);
- };
+ int (*probe)(struct i2c_client *client);
void (*remove)(struct i2c_client *client);
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 1b9b15a492fa..cdc684e04a2f 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -189,6 +189,8 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
+ const struct header_ops *header_ops_cache;
+
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*
diff --git a/include/linux/iio/common/inv_sensors_timestamp.h b/include/linux/iio/common/inv_sensors_timestamp.h
new file mode 100644
index 000000000000..a47d304d1ba7
--- /dev/null
+++ b/include/linux/iio/common/inv_sensors_timestamp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Invensense, Inc.
+ */
+
+#ifndef INV_SENSORS_TIMESTAMP_H_
+#define INV_SENSORS_TIMESTAMP_H_
+
+/**
+ * struct inv_sensors_timestamp_chip - chip internal properties
+ * @clock_period: internal clock period in ns
+ * @jitter: acceptable jitter in per-mille
+ * @init_period: chip initial period at reset in ns
+ */
+struct inv_sensors_timestamp_chip {
+ uint32_t clock_period;
+ uint32_t jitter;
+ uint32_t init_period;
+};
+
+/**
+ * struct inv_sensors_timestamp_interval - timestamps interval
+ * @lo: interval lower bound
+ * @up: interval upper bound
+ */
+struct inv_sensors_timestamp_interval {
+ int64_t lo;
+ int64_t up;
+};
+
+/**
+ * struct inv_sensors_timestamp_acc - accumulator for computing an estimation
+ * @val: current estimation of the value, the mean of all values
+ * @idx: current index of the next free place in values table
+ * @values: table of all measured values, use for computing the mean
+ */
+struct inv_sensors_timestamp_acc {
+ uint32_t val;
+ size_t idx;
+ uint32_t values[32];
+};
+
+/**
+ * struct inv_sensors_timestamp - timestamp management states
+ * @chip: chip internal characteristics
+ * @min_period: minimal acceptable clock period
+ * @max_period: maximal acceptable clock period
+ * @it: interrupts interval timestamps
+ * @timestamp: store last timestamp for computing next data timestamp
+ * @mult: current internal period multiplier
+ * @new_mult: new set internal period multiplier (not yet effective)
+ * @period: measured current period of the sensor
+ * @chip_period: accumulator for computing internal chip period
+ */
+struct inv_sensors_timestamp {
+ struct inv_sensors_timestamp_chip chip;
+ uint32_t min_period;
+ uint32_t max_period;
+ struct inv_sensors_timestamp_interval it;
+ int64_t timestamp;
+ uint32_t mult;
+ uint32_t new_mult;
+ uint32_t period;
+ struct inv_sensors_timestamp_acc chip_period;
+};
+
+void inv_sensors_timestamp_init(struct inv_sensors_timestamp *ts,
+ const struct inv_sensors_timestamp_chip *chip);
+
+int inv_sensors_timestamp_update_odr(struct inv_sensors_timestamp *ts,
+ uint32_t period, bool fifo);
+
+void inv_sensors_timestamp_interrupt(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ size_t sensor_nb, int64_t timestamp);
+
+static inline int64_t inv_sensors_timestamp_pop(struct inv_sensors_timestamp *ts)
+{
+ ts->timestamp += ts->period;
+ return ts->timestamp;
+}
+
+void inv_sensors_timestamp_apply_odr(struct inv_sensors_timestamp *ts,
+ uint32_t fifo_period, size_t fifo_nb,
+ unsigned int fifo_no);
+
+static inline void inv_sensors_timestamp_reset(struct inv_sensors_timestamp *ts)
+{
+ const struct inv_sensors_timestamp_interval interval_init = {0LL, 0LL};
+
+ ts->it = interval_init;
+ ts->timestamp = 0;
+}
+
+#endif
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index 6802596b017c..e9910b41d48e 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -201,8 +201,9 @@ struct iio_dev
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_channel_raw(struct iio_channel *chan,
int *val);
@@ -212,8 +213,9 @@ int iio_read_channel_raw(struct iio_channel *chan,
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*
* In opposit to the normal iio_read_channel_raw this function
* returns the average of multiple reads.
@@ -281,8 +283,9 @@ int iio_read_channel_attribute(struct iio_channel *chan, int *val,
* @chan: The channel being queried.
* @val: Value being written.
*
- * Note raw writes to iio channels are in dac counts and hence
- * scale will need to be applied if standard units required.
+ * Note that for raw writes to iio channels, if the value provided is
+ * in standard units, the affect of the scale and offset must be removed
+ * as (value / scale) - offset.
*/
int iio_write_channel_raw(struct iio_channel *chan, int val);
@@ -292,12 +295,25 @@ int iio_write_channel_raw(struct iio_channel *chan, int val);
* @chan: The channel being queried.
* @val: Value read back.
*
- * Note raw reads from iio channels are in adc counts and hence
- * scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
/**
+ * iio_read_min_channel_raw() - read minimum available raw value from a given
+ * channel, i.e. the minimum possible value.
+ * @chan: The channel being queried.
+ * @val: Value read back.
+ *
+ * Note, if standard units are required, raw reads from iio channels
+ * need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
+ */
+int iio_read_min_channel_raw(struct iio_channel *chan, int *val);
+
+/**
* iio_read_avail_channel_raw() - read available raw values from a given channel
* @chan: The channel being queried.
* @vals: Available values read back.
@@ -308,8 +324,9 @@ int iio_read_max_channel_raw(struct iio_channel *chan, int *val);
* For ranges, three vals are always returned; min, step and max.
* For lists, all the possible values are enumerated.
*
- * Note raw available values from iio channels are in adc counts and
- * hence scale will need to be applied if standard units are required.
+ * Note, if standard units are required, raw available values from iio
+ * channels need the offset (default 0) and scale (default 1) to be applied
+ * as (raw + offset) * scale.
*/
int iio_read_avail_channel_raw(struct iio_channel *chan,
const int **vals, int *length);
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index 82faa98c719a..117bde7d6ad7 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -19,6 +19,8 @@ enum iio_event_info {
IIO_EV_INFO_TIMEOUT,
IIO_EV_INFO_RESET_TIMEOUT,
IIO_EV_INFO_TAP2_MIN_DELAY,
+ IIO_EV_INFO_RUNNING_PERIOD,
+ IIO_EV_INFO_RUNNING_COUNT,
};
#define IIO_VAL_INT 1
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
index cda1f706eaeb..aa0b3ffea935 100644
--- a/include/linux/instruction_pointer.h
+++ b/include/linux/instruction_pointer.h
@@ -2,7 +2,12 @@
#ifndef _LINUX_INSTRUCTION_POINTER_H
#define _LINUX_INSTRUCTION_POINTER_H
+#include <asm/linkage.h>
+
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
+#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/media/dvb_math.h b/include/linux/int_log.h
index 8690ec42954d..0a6f58c38b61 100644
--- a/include/media/dvb_math.h
+++ b/include/linux/int_log.h
@@ -1,22 +1,12 @@
+/* SPDX-License-Identifier: LGPL-2.1-or-later */
/*
- * dvb-math provides some complex fixed-point math
- * operations shared between the dvb related stuff
+ * Provides fixed-point logarithm operations.
*
* Copyright (C) 2006 Christoph Pfister (christophpfister@gmail.com)
- *
- * This library is free software; you can redistribute it and/or modify
- * it under the terms of the GNU Lesser General Public License as
- * published by the Free Software Foundation; either version 2.1 of
- * the License, or (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU Lesser General Public License for more details.
*/
-#ifndef __DVB_MATH_H
-#define __DVB_MATH_H
+#ifndef __LINUX_INT_LOG_H
+#define __LINUX_INT_LOG_H
#include <linux/types.h>
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index f505788c05da..04d937ad4dc4 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -27,4 +27,6 @@ struct intel_tpmi_plat_info *tpmi_get_platform_data(struct auxiliary_device *aux
struct resource *tpmi_get_resource_at_index(struct auxiliary_device *auxdev, int index);
int tpmi_get_resource_count(struct auxiliary_device *auxdev);
+int tpmi_get_feature_status(struct auxiliary_device *auxdev, int feature_id, int *locked,
+ int *disabled);
#endif
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index e6d8aca6886d..7ba183f221f1 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -33,7 +33,7 @@ struct icc_node_data {
*/
struct icc_onecell_data {
unsigned int num_nodes;
- struct icc_node *nodes[];
+ struct icc_node *nodes[] __counted_by(num_nodes);
};
struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec,
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index bb9c666bd584..106cdc55ff3b 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -81,6 +81,7 @@ static inline void io_uring_free(struct task_struct *tsk)
if (tsk->io_uring)
__io_uring_free(tsk);
}
+int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
@@ -116,6 +117,11 @@ static inline const char *io_uring_get_opcode(u8 opcode)
{
return "";
}
+static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+ return -EOPNOTSUPP;
+}
#endif
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index f04ce513fadb..13d19b9be9f4 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -69,8 +69,8 @@ struct io_uring_task {
};
struct io_uring {
- u32 head ____cacheline_aligned_in_smp;
- u32 tail ____cacheline_aligned_in_smp;
+ u32 head;
+ u32 tail;
};
/*
@@ -176,7 +176,6 @@ struct io_submit_state {
unsigned short submit_nr;
unsigned int cqes_count;
struct blk_plug plug;
- struct io_uring_cqe cqes[16];
};
struct io_ev_fd {
@@ -205,25 +204,17 @@ struct io_ring_ctx {
unsigned int has_evfd: 1;
/* all CQEs should be posted only by the submitter task */
unsigned int task_complete: 1;
+ unsigned int lockless_cq: 1;
unsigned int syscall_iopoll: 1;
unsigned int poll_activated: 1;
unsigned int drain_disabled: 1;
unsigned int compat: 1;
- enum task_work_notify_mode notify_method;
+ struct task_struct *submitter_task;
+ struct io_rings *rings;
+ struct percpu_ref refs;
- /*
- * If IORING_SETUP_NO_MMAP is used, then the below holds
- * the gup'ed pages for the two rings, and the sqes.
- */
- unsigned short n_ring_pages;
- unsigned short n_sqe_pages;
- struct page **ring_pages;
- struct page **sqe_pages;
-
- struct io_rings *rings;
- struct task_struct *submitter_task;
- struct percpu_ref refs;
+ enum task_work_notify_mode notify_method;
} ____cacheline_aligned_in_smp;
/* submission data */
@@ -261,31 +252,20 @@ struct io_ring_ctx {
struct io_buffer_list *io_bl;
struct xarray io_bl_xa;
- struct list_head io_buffers_cache;
struct io_hash_table cancel_table_locked;
- struct list_head cq_overflow_list;
struct io_alloc_cache apoll_cache;
struct io_alloc_cache netmsg_cache;
- } ____cacheline_aligned_in_smp;
-
- /* IRQ completion list, under ->completion_lock */
- struct io_wq_work_list locked_free_list;
- unsigned int locked_free_nr;
-
- const struct cred *sq_creds; /* cred used for __io_sq_thread() */
- struct io_sq_data *sq_data; /* if using sq thread polling */
-
- struct wait_queue_head sqo_sq_wait;
- struct list_head sqd_list;
- unsigned long check_cq;
-
- unsigned int file_alloc_start;
- unsigned int file_alloc_end;
-
- struct xarray personalities;
- u32 pers_next;
+ /*
+ * ->iopoll_list is protected by the ctx->uring_lock for
+ * io_uring instances that don't use IORING_SETUP_SQPOLL.
+ * For SQPOLL, only the single threaded io_sq_thread() will
+ * manipulate the list, hence no extra locking is needed there.
+ */
+ struct io_wq_work_list iopoll_list;
+ bool poll_multi_queue;
+ } ____cacheline_aligned_in_smp;
struct {
/*
@@ -298,39 +278,55 @@ struct io_ring_ctx {
unsigned cached_cq_tail;
unsigned cq_entries;
struct io_ev_fd __rcu *io_ev_fd;
- struct wait_queue_head cq_wait;
unsigned cq_extra;
} ____cacheline_aligned_in_smp;
+ /*
+ * task_work and async notification delivery cacheline. Expected to
+ * regularly bounce b/w CPUs.
+ */
struct {
- spinlock_t completion_lock;
-
- bool poll_multi_queue;
- atomic_t cq_wait_nr;
-
- /*
- * ->iopoll_list is protected by the ctx->uring_lock for
- * io_uring instances that don't use IORING_SETUP_SQPOLL.
- * For SQPOLL, only the single threaded io_sq_thread() will
- * manipulate the list, hence no extra locking is needed there.
- */
- struct io_wq_work_list iopoll_list;
- struct io_hash_table cancel_table;
-
struct llist_head work_llist;
-
- struct list_head io_buffers_comp;
+ unsigned long check_cq;
+ atomic_t cq_wait_nr;
+ atomic_t cq_timeouts;
+ struct wait_queue_head cq_wait;
} ____cacheline_aligned_in_smp;
/* timeouts */
struct {
spinlock_t timeout_lock;
- atomic_t cq_timeouts;
struct list_head timeout_list;
struct list_head ltimeout_list;
unsigned cq_last_tm_flush;
} ____cacheline_aligned_in_smp;
+ struct io_uring_cqe completion_cqes[16];
+
+ spinlock_t completion_lock;
+
+ /* IRQ completion list, under ->completion_lock */
+ struct io_wq_work_list locked_free_list;
+ unsigned int locked_free_nr;
+
+ struct list_head io_buffers_comp;
+ struct list_head cq_overflow_list;
+ struct io_hash_table cancel_table;
+
+ const struct cred *sq_creds; /* cred used for __io_sq_thread() */
+ struct io_sq_data *sq_data; /* if using sq thread polling */
+
+ struct wait_queue_head sqo_sq_wait;
+ struct list_head sqd_list;
+
+ unsigned int file_alloc_start;
+ unsigned int file_alloc_end;
+
+ struct xarray personalities;
+ u32 pers_next;
+
+ struct list_head io_buffers_cache;
+
/* Keep this last, we don't need it for the fast path */
struct wait_queue_head poll_wq;
struct io_restriction restrictions;
@@ -374,6 +370,15 @@ struct io_ring_ctx {
unsigned sq_thread_idle;
/* protected by ->completion_lock */
unsigned evfd_last_cq_tail;
+
+ /*
+ * If IORING_SETUP_NO_MMAP is used, then the below holds
+ * the gup'ed pages for the two rings, and the sqes.
+ */
+ unsigned short n_ring_pages;
+ unsigned short n_sqe_pages;
+ struct page **ring_pages;
+ struct page **sqe_pages;
};
struct io_tw_state {
@@ -409,7 +414,6 @@ enum {
REQ_F_SINGLE_POLL_BIT,
REQ_F_DOUBLE_POLL_BIT,
REQ_F_PARTIAL_IO_BIT,
- REQ_F_CQE32_INIT_BIT,
REQ_F_APOLL_MULTISHOT_BIT,
REQ_F_CLEAR_POLLIN_BIT,
REQ_F_HASH_LOCKED_BIT,
@@ -479,8 +483,6 @@ enum {
REQ_F_PARTIAL_IO = BIT(REQ_F_PARTIAL_IO_BIT),
/* fast poll multishot mode */
REQ_F_APOLL_MULTISHOT = BIT(REQ_F_APOLL_MULTISHOT_BIT),
- /* ->extra1 and ->extra2 are initialised */
- REQ_F_CQE32_INIT = BIT(REQ_F_CQE32_INIT_BIT),
/* recvmsg special flag, clear EPOLLIN */
REQ_F_CLEAR_POLLIN = BIT(REQ_F_CLEAR_POLLIN_BIT),
/* hashed into ->cancel_hash_locked, protected by ->uring_lock */
@@ -579,13 +581,7 @@ struct io_kiocb {
struct io_task_work io_task_work;
unsigned nr_tw;
/* for polled requests, i.e. IORING_OP_POLL_ADD and async armed poll */
- union {
- struct hlist_node hash_node;
- struct {
- u64 extra1;
- u64 extra2;
- };
- };
+ struct hlist_node hash_node;
/* internal polling, see IORING_FEAT_FAST_POLL */
struct async_poll *apoll;
/* opcode allocated if it needs to store data for async defer */
@@ -595,6 +591,11 @@ struct io_kiocb {
/* custom credentials, valid IFF REQ_F_CREDS is set */
const struct cred *creds;
struct io_wq_work work;
+
+ struct {
+ u64 extra1;
+ u64 extra2;
+ } big_cqe;
};
struct io_overflow_cqe {
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index fdc6e64f49d6..96dd0acbba44 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -58,7 +58,11 @@ struct vm_fault;
#define IOMAP_F_DIRTY (1U << 1)
#define IOMAP_F_SHARED (1U << 2)
#define IOMAP_F_MERGED (1U << 3)
+#ifdef CONFIG_BUFFER_HEAD
#define IOMAP_F_BUFFER_HEAD (1U << 4)
+#else
+#define IOMAP_F_BUFFER_HEAD 0
+#endif /* CONFIG_BUFFER_HEAD */
#define IOMAP_F_XATTR (1U << 5)
/*
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index d31642596675..c50a769d569a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -196,6 +196,8 @@ enum iommu_dev_features {
IOMMU_DEV_FEAT_IOPF,
};
+#define IOMMU_NO_PASID (0U) /* Reserved for DMA w/o PASID */
+#define IOMMU_FIRST_GLOBAL_PASID (1U) /*starting range for allocation */
#define IOMMU_PASID_INVALID (-1U)
typedef unsigned int ioasid_t;
@@ -228,6 +230,10 @@ struct iommu_iotlb_gather {
/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
+ * @hw_info: report iommu hardware information. The data buffer returned by this
+ * op is allocated in the iommu driver and freed by the caller after
+ * use. The information type is one of enum iommu_hw_info_type defined
+ * in include/uapi/linux/iommufd.h.
* @domain_alloc: allocate iommu domain
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
@@ -257,6 +263,7 @@ struct iommu_iotlb_gather {
*/
struct iommu_ops {
bool (*capable)(struct device *dev, enum iommu_cap);
+ void *(*hw_info)(struct device *dev, u32 *length, u32 *type);
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
@@ -409,6 +416,8 @@ struct iommu_fault_param {
* @priv: IOMMU Driver private data
* @max_pasids: number of PASIDs this device can consume
* @attach_deferred: the dma domain attachment is deferred
+ * @pci_32bit_workaround: Limit DMA allocations to 32-bit IOVAs
+ * @require_direct: device requires IOMMU_RESV_DIRECT regions
*
* TODO: migrate other per device data pointers under iommu_dev_data, e.g.
* struct iommu_group *iommu_group;
@@ -422,6 +431,8 @@ struct dev_iommu {
void *priv;
u32 max_pasids;
u32 attach_deferred:1;
+ u32 pci_32bit_workaround:1;
+ u32 require_direct:1;
};
int iommu_device_register(struct iommu_device *iommu,
@@ -450,17 +461,6 @@ static inline void iommu_iotlb_gather_init(struct iommu_iotlb_gather *gather)
};
}
-static inline const struct iommu_ops *dev_iommu_ops(struct device *dev)
-{
- /*
- * Assume that valid ops must be installed if iommu_probe_device()
- * has succeeded. The device ops are essentially for internal use
- * within the IOMMU subsystem itself, so we should be able to trust
- * ourselves not to misuse the helper.
- */
- return dev->iommu->iommu_dev->ops;
-}
-
extern int bus_iommu_probe(const struct bus_type *bus);
extern bool iommu_present(const struct bus_type *bus);
extern bool device_iommu_capable(struct device *dev, enum iommu_cap cap);
@@ -727,6 +727,8 @@ void iommu_detach_device_pasid(struct iommu_domain *domain,
struct iommu_domain *
iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
unsigned int type);
+ioasid_t iommu_alloc_global_pasid(struct device *dev);
+void iommu_free_global_pasid(ioasid_t pasid);
#else /* CONFIG_IOMMU_API */
struct iommu_ops {};
@@ -1088,6 +1090,13 @@ iommu_get_domain_for_dev_pasid(struct device *dev, ioasid_t pasid,
{
return NULL;
}
+
+static inline ioasid_t iommu_alloc_global_pasid(struct device *dev)
+{
+ return IOMMU_PASID_INVALID;
+}
+
+static inline void iommu_free_global_pasid(ioasid_t pasid) {}
#endif /* CONFIG_IOMMU_API */
/**
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 1129a36a74c4..ffc3a949f837 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -16,14 +16,19 @@ struct page;
struct iommufd_ctx;
struct iommufd_access;
struct file;
+struct iommu_group;
struct iommufd_device *iommufd_device_bind(struct iommufd_ctx *ictx,
struct device *dev, u32 *id);
void iommufd_device_unbind(struct iommufd_device *idev);
int iommufd_device_attach(struct iommufd_device *idev, u32 *pt_id);
+int iommufd_device_replace(struct iommufd_device *idev, u32 *pt_id);
void iommufd_device_detach(struct iommufd_device *idev);
+struct iommufd_ctx *iommufd_device_to_ictx(struct iommufd_device *idev);
+u32 iommufd_device_to_id(struct iommufd_device *idev);
+
struct iommufd_access_ops {
u8 needs_pin_pages : 1;
void (*unmap)(void *data, unsigned long iova, unsigned long length);
@@ -44,12 +49,16 @@ iommufd_access_create(struct iommufd_ctx *ictx,
const struct iommufd_access_ops *ops, void *data, u32 *id);
void iommufd_access_destroy(struct iommufd_access *access);
int iommufd_access_attach(struct iommufd_access *access, u32 ioas_id);
+int iommufd_access_replace(struct iommufd_access *access, u32 ioas_id);
+void iommufd_access_detach(struct iommufd_access *access);
void iommufd_ctx_get(struct iommufd_ctx *ictx);
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_ctx *iommufd_ctx_from_file(struct file *file);
+struct iommufd_ctx *iommufd_ctx_from_fd(int fd);
void iommufd_ctx_put(struct iommufd_ctx *ictx);
+bool iommufd_ctx_has_group(struct iommufd_ctx *ictx, struct iommu_group *group);
int iommufd_access_pin_pages(struct iommufd_access *access, unsigned long iova,
unsigned long length, struct page **out_pages,
diff --git a/include/linux/ioremap.h b/include/linux/ioremap.h
new file mode 100644
index 000000000000..f0e99fc7dd8b
--- /dev/null
+++ b/include/linux/ioremap.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IOREMAP_H
+#define _LINUX_IOREMAP_H
+
+#include <linux/kasan.h>
+#include <asm/pgtable.h>
+
+#if defined(CONFIG_HAS_IOMEM) || defined(CONFIG_GENERIC_IOREMAP)
+/*
+ * Ioremap often, but not always uses the generic vmalloc area. E.g on
+ * Power ARCH, it could have different ioremap space.
+ */
+#ifndef IOREMAP_START
+#define IOREMAP_START VMALLOC_START
+#define IOREMAP_END VMALLOC_END
+#endif
+static inline bool is_ioremap_addr(const void *x)
+{
+ unsigned long addr = (unsigned long)kasan_reset_tag(x);
+
+ return addr >= IOREMAP_START && addr < IOREMAP_END;
+}
+#else
+static inline bool is_ioremap_addr(const void *x)
+{
+ return false;
+}
+#endif
+
+#endif /* _LINUX_IOREMAP_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5883551b1ee8..af8a771a053c 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -147,6 +147,7 @@ struct inet6_skb_parm {
#define IP6SKB_JUMBOGRAM 128
#define IP6SKB_SEG6 256
#define IP6SKB_FAKEJUMBO 512
+#define IP6SKB_MULTIPATH 1024
};
#if defined(CONFIG_NET_L3_MASTER_DEV)
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 44c298aa58d4..52772c826c86 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -631,11 +631,6 @@ struct transaction_s
struct list_head t_inode_list;
/*
- * Protects info related to handles
- */
- spinlock_t t_handle_lock;
-
- /*
* Longest time some handle had to wait for running transaction
*/
unsigned long t_max_wait;
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 5e13f801c902..e0ae2a43e0eb 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -72,9 +72,15 @@ extern int register_refined_jiffies(long clock_tick_rate);
#endif
/*
- * The 64-bit value is not atomic - you MUST NOT read it
+ * The 64-bit value is not atomic on 32-bit systems - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
+ *
+ * jiffies and jiffies_64 are at the same address for little-endian systems
+ * and for 64-bit big-endian systems.
+ * On 32-bit big-endian systems, jiffies is the lower 32 bits of jiffies_64
+ * (i.e., at address @jiffies_64 + 4).
+ * See arch/ARCH/kernel/vmlinux.lds.S
*/
extern u64 __cacheline_aligned_in_smp jiffies_64;
extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffies;
@@ -82,6 +88,14 @@ extern unsigned long volatile __cacheline_aligned_in_smp __jiffy_arch_data jiffi
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
#else
+/**
+ * get_jiffies_64 - read the 64-bit non-atomic jiffies_64 value
+ *
+ * When BITS_PER_LONG < 64, this uses sequence number sampling using
+ * jiffies_lock to protect the 64-bit read.
+ *
+ * Return: current 64-bit jiffies value
+ */
static inline u64 get_jiffies_64(void)
{
return (u64)jiffies;
@@ -89,39 +103,76 @@ static inline u64 get_jiffies_64(void)
#endif
/*
- * These inlines deal with timer wrapping correctly. You are
- * strongly encouraged to use them
+ * These inlines deal with timer wrapping correctly. You are
+ * strongly encouraged to use them:
* 1. Because people otherwise forget
* 2. Because if the timer wrap changes in future you won't have to
* alter your driver code.
- *
- * time_after(a,b) returns true if the time a is after time b.
+ */
+
+/**
+ * time_after - returns true if the time a is after time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
*
* Do this with "<0" and ">=0" to only test the sign of the result. A
* good compiler would generate better code (and a really good compiler
* wouldn't care). Gcc is currently neither.
+ *
+ * Return: %true is time a is after time b, otherwise %false.
*/
#define time_after(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((b) - (a)) < 0))
+/**
+ * time_before - returns true if the time a is before time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before(a,b) time_after(b,a)
+/**
+ * time_after_eq - returns true if the time a is after or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq(a,b) \
(typecheck(unsigned long, a) && \
typecheck(unsigned long, b) && \
((long)((a) - (b)) >= 0))
+/**
+ * time_before_eq - returns true if the time a is before or the same as time b.
+ * @a: first comparable as unsigned long
+ * @b: second comparable as unsigned long
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq(a,b) time_after_eq(b,a)
-/*
- * Calculate whether a is in the range of [b, c].
+/**
+ * time_in_range - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
*/
#define time_in_range(a,b,c) \
(time_after_eq(a,b) && \
time_before_eq(a,c))
-/*
- * Calculate whether a is in the range of [b, c).
+/**
+ * time_in_range_open - Calculate whether a is in the range of [b, c).
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c), otherwise %false.
*/
#define time_in_range_open(a,b,c) \
(time_after_eq(a,b) && \
@@ -129,45 +180,138 @@ static inline u64 get_jiffies_64(void)
/* Same as above, but does so with platform independent 64bit types.
* These must be used when utilizing jiffies_64 (i.e. return value of
- * get_jiffies_64() */
+ * get_jiffies_64()). */
+
+/**
+ * time_after64 - returns true if the time a is after time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after time b, otherwise %false.
+ */
#define time_after64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((b) - (a)) < 0))
+/**
+ * time_before64 - returns true if the time a is before time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before time b, otherwise %false.
+ */
#define time_before64(a,b) time_after64(b,a)
+/**
+ * time_after_eq64 - returns true if the time a is after or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is after or the same as time b, otherwise %false.
+ */
#define time_after_eq64(a,b) \
(typecheck(__u64, a) && \
typecheck(__u64, b) && \
((__s64)((a) - (b)) >= 0))
+/**
+ * time_before_eq64 - returns true if the time a is before or the same as time b.
+ * @a: first comparable as __u64
+ * @b: second comparable as __u64
+ *
+ * This must be used when utilizing jiffies_64 (i.e. return value of
+ * get_jiffies_64()).
+ *
+ * Return: %true is time a is before or the same as time b, otherwise %false.
+ */
#define time_before_eq64(a,b) time_after_eq64(b,a)
+/**
+ * time_in_range64 - Calculate whether a is in the range of [b, c].
+ * @a: time to test
+ * @b: beginning of the range
+ * @c: end of the range
+ *
+ * Return: %true is time a is in the range [b, c], otherwise %false.
+ */
#define time_in_range64(a, b, c) \
(time_after_eq64(a, b) && \
time_before_eq64(a, c))
/*
- * These four macros compare jiffies and 'a' for convenience.
+ * These eight macros compare jiffies[_64] and 'a' for convenience.
*/
-/* time_is_before_jiffies(a) return true if a is before jiffies */
+/**
+ * time_is_before_jiffies - return true if a is before jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before jiffies, otherwise %false.
+ */
#define time_is_before_jiffies(a) time_after(jiffies, a)
+/**
+ * time_is_before_jiffies64 - return true if a is before jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before jiffies_64, otherwise %false.
+ */
#define time_is_before_jiffies64(a) time_after64(get_jiffies_64(), a)
-/* time_is_after_jiffies(a) return true if a is after jiffies */
+/**
+ * time_is_after_jiffies - return true if a is after jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after jiffies, otherwise %false.
+ */
#define time_is_after_jiffies(a) time_before(jiffies, a)
+/**
+ * time_is_after_jiffies64 - return true if a is after jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after jiffies_64, otherwise %false.
+ */
#define time_is_after_jiffies64(a) time_before64(get_jiffies_64(), a)
-/* time_is_before_eq_jiffies(a) return true if a is before or equal to jiffies*/
+/**
+ * time_is_before_eq_jiffies - return true if a is before or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is before or the same as jiffies, otherwise %false.
+ */
#define time_is_before_eq_jiffies(a) time_after_eq(jiffies, a)
+/**
+ * time_is_before_eq_jiffies64 - return true if a is before or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is before or the same jiffies_64, otherwise %false.
+ */
#define time_is_before_eq_jiffies64(a) time_after_eq64(get_jiffies_64(), a)
-/* time_is_after_eq_jiffies(a) return true if a is after or equal to jiffies*/
+/**
+ * time_is_after_eq_jiffies - return true if a is after or equal to jiffies
+ * @a: time (unsigned long) to compare to jiffies
+ *
+ * Return: %true is time a is after or the same as jiffies, otherwise %false.
+ */
#define time_is_after_eq_jiffies(a) time_before_eq(jiffies, a)
+/**
+ * time_is_after_eq_jiffies64 - return true if a is after or equal to jiffies_64
+ * @a: time (__u64) to compare to jiffies_64
+ *
+ * Return: %true is time a is after or the same as jiffies_64, otherwise %false.
+ */
#define time_is_after_eq_jiffies64(a) time_before_eq64(get_jiffies_64(), a)
/*
- * Have the 32 bit jiffies value wrap 5 minutes after boot
+ * Have the 32-bit jiffies value wrap 5 minutes after boot
* so jiffies wrap bugs show up earlier.
*/
#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -278,7 +422,7 @@ extern unsigned long preset_lpj;
#if BITS_PER_LONG < 64
# define MAX_SEC_IN_JIFFIES \
(long)((u64)((u64)MAX_JIFFY_OFFSET * TICK_NSEC) / NSEC_PER_SEC)
-#else /* take care of overflow on 64 bits machines */
+#else /* take care of overflow on 64-bit machines */
# define MAX_SEC_IN_JIFFIES \
(SH_DIV((MAX_JIFFY_OFFSET >> SEC_JIFFIE_SC) * TICK_NSEC, NSEC_PER_SEC, 1) - 1)
@@ -290,6 +434,12 @@ extern unsigned long preset_lpj;
extern unsigned int jiffies_to_msecs(const unsigned long j);
extern unsigned int jiffies_to_usecs(const unsigned long j);
+/**
+ * jiffies_to_nsecs - Convert jiffies to nanoseconds
+ * @j: jiffies value
+ *
+ * Return: nanoseconds value
+ */
static inline u64 jiffies_to_nsecs(const unsigned long j)
{
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
@@ -353,12 +503,14 @@ static inline unsigned long _msecs_to_jiffies(const unsigned int m)
*
* msecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __msecs_to_jiffies() is called if the value passed does not
+ * code. __msecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * The HZ range specific helpers _msecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
{
@@ -400,12 +552,14 @@ static inline unsigned long _usecs_to_jiffies(const unsigned int u)
*
* usecs_to_jiffies() checks for the passed in value being a constant
* via __builtin_constant_p() allowing gcc to eliminate most of the
- * code, __usecs_to_jiffies() is called if the value passed does not
+ * code. __usecs_to_jiffies() is called if the value passed does not
* allow constant folding and the actual conversion must be done at
* runtime.
- * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * The HZ range specific helpers _usecs_to_jiffies() are called both
* directly here and from __msecs_to_jiffies() in the case where
* constant folding is not possible.
+ *
+ * Return: jiffies value
*/
static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
{
@@ -422,6 +576,7 @@ extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
extern clock_t jiffies_to_clock_t(unsigned long x);
+
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
return jiffies_to_clock_t(max(0L, delta));
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 819b6bc8ac08..3df5499f7936 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,11 +54,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
+#ifndef __HAVE_ARCH_SHADOW_MAP
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 0d91e0af0125..cee8fe87e9f4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -29,6 +29,7 @@
#include <linux/panic.h>
#include <linux/printk.h>
#include <linux/build_bug.h>
+#include <linux/sprintf.h>
#include <linux/static_call_types.h>
#include <linux/instruction_pointer.h>
#include <asm/byteorder.h>
@@ -203,35 +204,6 @@ static inline void might_fault(void) { }
void do_exit(long error_code) __noreturn;
-extern int num_to_str(char *buf, int size,
- unsigned long long num, unsigned int width);
-
-/* lib/printf utilities */
-
-extern __printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
-extern __printf(2, 0) int vsprintf(char *buf, const char *, va_list);
-extern __printf(3, 4)
-int snprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(3, 4)
-int scnprintf(char *buf, size_t size, const char *fmt, ...);
-extern __printf(3, 0)
-int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
-extern __printf(2, 3) __malloc
-char *kasprintf(gfp_t gfp, const char *fmt, ...);
-extern __printf(2, 0) __malloc
-char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
-extern __printf(2, 0)
-const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
-
-extern __scanf(2, 3)
-int sscanf(const char *, const char *, ...);
-extern __scanf(2, 0)
-int vsscanf(const char *, const char *, va_list);
-
-extern int no_hash_pointers_enable(char *str);
-
extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
@@ -457,13 +429,6 @@ ftrace_vprintk(const char *fmt, va_list ap)
static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { }
#endif /* CONFIG_TRACING */
-/* This counts to 12. Any more, it will return 13th argument. */
-#define __COUNT_ARGS(_0, _1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _n, X...) _n
-#define COUNT_ARGS(X...) __COUNT_ARGS(, ##X, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
-
-#define __CONCAT(a, b) a ## b
-#define CONCATENATE(a, b) __CONCAT(a, b)
-
/* Rebuild everything on CONFIG_FTRACE_MCOUNT_RECORD */
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
# define REBUILD_DUE_TO_FTRACE_MCOUNT_RECORD
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 73f5c120def8..2a36f3218b51 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -550,6 +550,10 @@ static inline int kernfs_setattr(struct kernfs_node *kn,
const struct iattr *iattr)
{ return -ENOSYS; }
+static inline __poll_t kernfs_generic_poll(struct kernfs_open_file *of,
+ struct poll_table_struct *pt)
+{ return -ENOSYS; }
+
static inline void kernfs_notify(struct kernfs_node *kn) { }
static inline int kernfs_xattr_get(struct kernfs_node *kn, const char *name,
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 22b5cd24f581..32c78078552c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -33,6 +33,7 @@ extern note_buf_t __percpu *crash_notes;
#include <linux/compat.h>
#include <linux/ioport.h>
#include <linux/module.h>
+#include <linux/highmem.h>
#include <asm/kexec.h>
/* Verify architecture specific macros are defined */
@@ -230,21 +231,6 @@ static inline int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
}
#endif
-/* Alignment required for elf header segment */
-#define ELF_CORE_HEADER_ALIGN 4096
-
-struct crash_mem {
- unsigned int max_nr_ranges;
- unsigned int nr_ranges;
- struct range ranges[];
-};
-
-extern int crash_exclude_mem_range(struct crash_mem *mem,
- unsigned long long mstart,
- unsigned long long mend);
-extern int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
- void **addr, unsigned long *sz);
-
#ifndef arch_kexec_apply_relocations_add
/*
* arch_kexec_apply_relocations_add - apply relocations of type RELA
@@ -334,6 +320,10 @@ struct kimage {
unsigned int preserve_context : 1;
/* If set, we are using file mode kexec syscall */
unsigned int file_mode:1;
+#ifdef CONFIG_CRASH_HOTPLUG
+ /* If set, allow changes to elfcorehdr of kexec_load'd image */
+ unsigned int update_elfcorehdr:1;
+#endif
#ifdef ARCH_HAS_KIMAGE_ARCH
struct kimage_arch arch;
@@ -360,6 +350,12 @@ struct kimage {
struct purgatory_info purgatory_info;
#endif
+#ifdef CONFIG_CRASH_HOTPLUG
+ int hp_action;
+ int elfcorehdr_index;
+ bool elfcorehdr_updated;
+#endif
+
#ifdef CONFIG_IMA_KEXEC
/* Virtual address of IMA measurement buffer for kexec syscall */
void *ima_buffer;
@@ -404,9 +400,9 @@ bool kexec_load_permitted(int kexec_image_type);
/* List of defined/legal kexec flags */
#ifndef CONFIG_KEXEC_JUMP
-#define KEXEC_FLAGS KEXEC_ON_CRASH
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_UPDATE_ELFCOREHDR)
#else
-#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT)
+#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT | KEXEC_UPDATE_ELFCOREHDR)
#endif
/* List of defined/legal kexec file flags */
@@ -490,6 +486,24 @@ static inline int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages, g
static inline void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages) { }
#endif
+#ifndef arch_crash_handle_hotplug_event
+static inline void arch_crash_handle_hotplug_event(struct kimage *image) { }
+#endif
+
+int crash_check_update_elfcorehdr(void);
+
+#ifndef crash_hotplug_cpu_support
+static inline int crash_hotplug_cpu_support(void) { return 0; }
+#endif
+
+#ifndef crash_hotplug_memory_support
+static inline int crash_hotplug_memory_support(void) { return 0; }
+#endif
+
+#ifndef crash_get_elfcorehdr_size
+static inline unsigned int crash_get_elfcorehdr_size(void) { return 0; }
+#endif
+
#else /* !CONFIG_KEXEC_CORE */
struct pt_regs;
struct task_struct;
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 726857a4b680..401af4757514 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -59,15 +59,16 @@ static __always_inline bool is_kfence_address(const void *addr)
}
/**
- * kfence_alloc_pool() - allocate the KFENCE pool via memblock
+ * kfence_alloc_pool_and_metadata() - allocate the KFENCE pool and KFENCE
+ * metadata via memblock
*/
-void __init kfence_alloc_pool(void);
+void __init kfence_alloc_pool_and_metadata(void);
/**
* kfence_init() - perform KFENCE initialization at boot time
*
- * Requires that kfence_alloc_pool() was called before. This sets up the
- * allocation gate timer, and requires that workqueues are available.
+ * Requires that kfence_alloc_pool_and_metadata() was called before. This sets
+ * up the allocation gate timer, and requires that workqueues are available.
*/
void __init kfence_init(void);
@@ -223,7 +224,7 @@ bool __kfence_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *sla
#else /* CONFIG_KFENCE */
static inline bool is_kfence_address(const void *addr) { return false; }
-static inline void kfence_alloc_pool(void) { }
+static inline void kfence_alloc_pool_and_metadata(void) { }
static inline void kfence_init(void) { }
static inline void kfence_shutdown_cache(struct kmem_cache *s) { }
static inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) { return NULL; }
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index c392c811d9ad..c30affcc43b4 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -69,14 +69,16 @@ struct kobject {
const struct kobj_type *ktype;
struct kernfs_node *sd; /* sysfs directory entry */
struct kref kref;
-#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
- struct delayed_work release;
-#endif
+
unsigned int state_initialized:1;
unsigned int state_in_sysfs:1;
unsigned int state_add_uevent_sent:1;
unsigned int state_remove_uevent_sent:1;
unsigned int uevent_suppress:1;
+
+#ifdef CONFIG_DEBUG_KOBJECT_RELEASE
+ struct delayed_work release;
+#endif
};
__printf(2, 3) int kobject_set_name(struct kobject *kobj, const char *name, ...);
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 899a314bc487..c2dd786a30e1 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -26,6 +26,22 @@ int ksm_disable(struct mm_struct *mm);
int __ksm_enter(struct mm_struct *mm);
void __ksm_exit(struct mm_struct *mm);
+/*
+ * To identify zeropages that were mapped by KSM, we reuse the dirty bit
+ * in the PTE. If the PTE is dirty, the zeropage was mapped by KSM when
+ * deduplicating memory.
+ */
+#define is_ksm_zero_pte(pte) (is_zero_pfn(pte_pfn(pte)) && pte_dirty(pte))
+
+extern unsigned long ksm_zero_pages;
+
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+ if (is_ksm_zero_pte(pte)) {
+ ksm_zero_pages--;
+ mm->ksm_zero_pages--;
+ }
+}
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
{
@@ -95,6 +111,10 @@ static inline void ksm_exit(struct mm_struct *mm)
{
}
+static inline void ksm_might_unmap_zero_page(struct mm_struct *mm, pte_t pte)
+{
+}
+
#ifdef CONFIG_MEMORY_FAILURE
static inline void collect_procs_ksm(struct page *page,
struct list_head *to_kill, int force_early)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index f1f95a71a4bc..2c30ade43bc8 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -88,7 +88,6 @@ void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
-bool __kthread_should_park(struct task_struct *k);
bool kthread_should_stop_or_park(void);
bool kthread_freezable_should_stop(bool *was_frozen);
void *kthread_func(struct task_struct *k);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 9d3ac7720da9..fb6c6109fdca 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -190,8 +190,6 @@ bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req,
struct kvm_vcpu *except);
-bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
- unsigned long *vcpu_bitmap);
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -256,11 +254,15 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
#ifdef KVM_ARCH_WANT_MMU_NOTIFIER
+union kvm_mmu_notifier_arg {
+ pte_t pte;
+};
+
struct kvm_gfn_range {
struct kvm_memory_slot *slot;
gfn_t start;
gfn_t end;
- pte_t pte;
+ union kvm_mmu_notifier_arg arg;
bool may_block;
};
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
@@ -865,6 +867,25 @@ static inline void kvm_vm_bugged(struct kvm *kvm)
unlikely(__ret); \
})
+/*
+ * Note, "data corruption" refers to corruption of host kernel data structures,
+ * not guest data. Guest data corruption, suspected or confirmed, that is tied
+ * and contained to a single VM should *never* BUG() and potentially panic the
+ * host, i.e. use this variant of KVM_BUG() if and only if a KVM data structure
+ * is corrupted and that corruption can have a cascading effect to other parts
+ * of the hosts and/or to other VMs.
+ */
+#define KVM_BUG_ON_DATA_CORRUPTION(cond, kvm) \
+({ \
+ bool __ret = !!(cond); \
+ \
+ if (IS_ENABLED(CONFIG_BUG_ON_DATA_CORRUPTION)) \
+ BUG_ON(__ret); \
+ else if (WARN_ON_ONCE(__ret && !(kvm)->vm_bugged)) \
+ kvm_vm_bugged(kvm); \
+ unlikely(__ret); \
+})
+
static inline void kvm_vcpu_srcu_read_lock(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PROVE_RCU
@@ -1359,6 +1380,9 @@ int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool yield_to_kernel_mode);
void kvm_flush_remote_tlbs(struct kvm *kvm);
+void kvm_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
+void kvm_flush_remote_tlbs_memslot(struct kvm *kvm,
+ const struct kvm_memory_slot *memslot);
#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min);
@@ -1387,10 +1411,7 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
unsigned long mask);
void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot);
-#ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
-void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm,
- const struct kvm_memory_slot *memslot);
-#else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */
+#ifndef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT
int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log);
int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log,
int *is_dirty, struct kvm_memory_slot **memslot);
@@ -1479,11 +1500,23 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
-#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
-static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
+static inline int kvm_arch_flush_remote_tlbs(struct kvm *kvm)
{
return -ENOTSUPP;
}
+#else
+int kvm_arch_flush_remote_tlbs(struct kvm *kvm);
+#endif
+
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
+static inline int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm,
+ gfn_t gfn, u64 nr_pages)
+{
+ return -EOPNOTSUPP;
+}
+#else
+int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, gfn_t gfn, u64 nr_pages);
#endif
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
@@ -2148,8 +2181,6 @@ struct kvm_device_ops {
int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma);
};
-void kvm_device_get(struct kvm_device *dev);
-void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 7d428100b42b..aa16dc2a8230 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -100,6 +100,7 @@ struct led_classdev {
const char *name;
unsigned int brightness;
unsigned int max_brightness;
+ unsigned int color;
int flags;
/* Lower 16 bits reflect status */
@@ -313,6 +314,8 @@ extern struct led_classdev *of_led_get(struct device_node *np, int index);
extern void led_put(struct led_classdev *led_cdev);
struct led_classdev *__must_check devm_of_led_get(struct device *dev,
int index);
+struct led_classdev *__must_check devm_of_led_get_optional(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 820f7a3a2749..bf4913f4d7ac 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -222,6 +222,10 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
@@ -344,7 +348,6 @@ enum {
ATA_LINK_RESUME_TRIES = 5,
/* how hard are we gonna try to probe/recover devices */
- ATA_PROBE_MAX_TRIES = 3,
ATA_EH_DEV_TRIES = 3,
ATA_EH_PMP_TRIES = 5,
ATA_EH_PMP_LINK_TRIES = 3,
@@ -977,12 +980,6 @@ struct ata_port_operations {
ssize_t size);
/*
- * Obsolete
- */
- void (*phy_reset)(struct ata_port *ap);
- void (*eng_timeout)(struct ata_port *ap);
-
- /*
* ->inherits must be the last field and all the preceding
* fields must be pointers.
*/
@@ -1116,7 +1113,7 @@ static inline void ata_sas_port_resume(struct ata_port *ap)
extern int ata_ratelimit(void);
extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
- u32 val, unsigned long interval, unsigned long timeout);
+ u32 val, unsigned int interval, unsigned int timeout);
extern int atapi_cmd_type(u8 opcode);
extern unsigned int ata_pack_xfermask(unsigned int pio_mask,
unsigned int mwdma_mask,
@@ -1166,11 +1163,11 @@ extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *
* SATA specific code - drivers/ata/libata-sata.c
*/
#ifdef CONFIG_SATA_HOST
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
+extern const unsigned int sata_deb_timing_normal[];
+extern const unsigned int sata_deb_timing_hotplug[];
+extern const unsigned int sata_deb_timing_long[];
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
@@ -1185,14 +1182,14 @@ extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern int sata_set_spd(struct ata_link *link);
extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
+ const unsigned int *timing, unsigned long deadline,
bool *online, int (*check_ready)(struct ata_link *));
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+extern int sata_link_resume(struct ata_link *link, const unsigned int *params,
unsigned long deadline);
extern int ata_eh_read_sense_success_ncq_log(struct ata_link *link);
extern void ata_eh_analyze_ncq_error(struct ata_link *link);
#else
-static inline const unsigned long *
+static inline const unsigned int *
sata_ehc_deb_timing(struct ata_eh_context *ehc)
{
return NULL;
@@ -1212,7 +1209,7 @@ static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
}
static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
static inline int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing,
+ const unsigned int *timing,
unsigned long deadline,
bool *online,
int (*check_ready)(struct ata_link *))
@@ -1222,7 +1219,7 @@ static inline int sata_link_hardreset(struct ata_link *link,
return -EOPNOTSUPP;
}
static inline int sata_link_resume(struct ata_link *link,
- const unsigned long *params,
+ const unsigned int *params,
unsigned long deadline)
{
return -EOPNOTSUPP;
@@ -1234,20 +1231,15 @@ static inline int ata_eh_read_sense_success_ncq_log(struct ata_link *link)
static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
#endif
extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
+ const unsigned int *params, unsigned long deadline);
extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
bool spm_wakeup);
extern int ata_slave_link_init(struct ata_port *ap);
-extern void ata_sas_port_destroy(struct ata_port *);
extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
+extern void ata_port_probe(struct ata_port *ap);
extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
extern void ata_tf_to_fis(const struct ata_taskfile *tf,
@@ -1785,7 +1777,7 @@ static inline struct ata_queued_cmd *ata_qc_from_tag(struct ata_port *ap,
{
struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
- if (unlikely(!qc) || !ap->ops->error_handler)
+ if (unlikely(!qc))
return qc;
if ((qc->flags & (ATA_QCFLAG_ACTIVE |
@@ -1876,7 +1868,7 @@ static inline int ata_check_ready(u8 status)
}
static inline unsigned long ata_deadline(unsigned long from_jiffies,
- unsigned long timeout_msecs)
+ unsigned int timeout_msecs)
{
return from_jiffies + msecs_to_jiffies(timeout_msecs);
}
diff --git a/include/linux/limits.h b/include/linux/limits.h
index f6bcc9369010..38eb7f6f7e88 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -10,6 +10,8 @@
#define SSIZE_MAX ((ssize_t)(SIZE_MAX >> 1))
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
+#define RESOURCE_SIZE_MAX ((resource_size_t)~0)
+
#define U8_MAX ((u8)~0U)
#define S8_MAX ((s8)(U8_MAX >> 1))
#define S8_MIN ((s8)(-S8_MAX - 1))
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index f42594a9efe0..0f016d69c996 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -204,6 +204,8 @@ extern unsigned long nlmsvc_timeout;
extern bool nsm_use_hostnames;
extern u32 nsm_local_state;
+extern struct timer_list nlmsvc_retry;
+
/*
* Lockd client functions
*/
@@ -280,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-unsigned long nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(void);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index f5b7352afaac..ac962c4cb44b 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -32,11 +32,11 @@ LSM_HOOK(int, 0, binder_transaction, const struct cred *from,
LSM_HOOK(int, 0, binder_transfer_binder, const struct cred *from,
const struct cred *to)
LSM_HOOK(int, 0, binder_transfer_file, const struct cred *from,
- const struct cred *to, struct file *file)
+ const struct cred *to, const struct file *file)
LSM_HOOK(int, 0, ptrace_access_check, struct task_struct *child,
unsigned int mode)
LSM_HOOK(int, 0, ptrace_traceme, struct task_struct *parent)
-LSM_HOOK(int, 0, capget, struct task_struct *target, kernel_cap_t *effective,
+LSM_HOOK(int, 0, capget, const struct task_struct *target, kernel_cap_t *effective,
kernel_cap_t *inheritable, kernel_cap_t *permitted)
LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
const kernel_cap_t *effective, const kernel_cap_t *inheritable,
@@ -112,9 +112,9 @@ LSM_HOOK(int, 0, path_notify, const struct path *path, u64 mask,
unsigned int obj_type)
LSM_HOOK(int, 0, inode_alloc_security, struct inode *inode)
LSM_HOOK(void, LSM_RET_VOID, inode_free_security, struct inode *inode)
-LSM_HOOK(int, 0, inode_init_security, struct inode *inode,
- struct inode *dir, const struct qstr *qstr, const char **name,
- void **value, size_t *len)
+LSM_HOOK(int, -EOPNOTSUPP, inode_init_security, struct inode *inode,
+ struct inode *dir, const struct qstr *qstr, struct xattr *xattrs,
+ int *xattr_count)
LSM_HOOK(int, 0, inode_init_security_anon, struct inode *inode,
const struct qstr *name, const struct inode *context_inode)
LSM_HOOK(int, 0, inode_create, struct inode *dir, struct dentry *dentry,
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index ab2b2fafa4a4..dcb5e5b5eb13 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -28,6 +28,7 @@
#include <linux/security.h>
#include <linux/init.h>
#include <linux/rculist.h>
+#include <linux/xattr.h>
union security_list_options {
#define LSM_HOOK(RET, DEFAULT, NAME, ...) RET (*NAME)(__VA_ARGS__);
@@ -63,8 +64,27 @@ struct lsm_blob_sizes {
int lbs_ipc;
int lbs_msg_msg;
int lbs_task;
+ int lbs_xattr_count; /* number of xattr slots in new_xattrs array */
};
+/**
+ * lsm_get_xattr_slot - Return the next available slot and increment the index
+ * @xattrs: array storing LSM-provided xattrs
+ * @xattr_count: number of already stored xattrs (updated)
+ *
+ * Retrieve the first available slot in the @xattrs array to fill with an xattr,
+ * and increment @xattr_count.
+ *
+ * Return: The slot to fill in @xattrs if non-NULL, NULL otherwise.
+ */
+static inline struct xattr *lsm_get_xattr_slot(struct xattr *xattrs,
+ int *xattr_count)
+{
+ if (unlikely(!xattrs))
+ return NULL;
+ return &xattrs[(*xattr_count)++];
+}
+
/*
* LSM_RET_VOID is used as the default value in LSM_HOOK definitions for void
* LSM hooks (in include/linux/lsm_hook_defs.h).
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index 295548cca8b3..e41c70ac7744 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -29,14 +29,12 @@
#define MAPLE_NODE_SLOTS 31 /* 256 bytes including ->parent */
#define MAPLE_RANGE64_SLOTS 16 /* 256 bytes */
#define MAPLE_ARANGE64_SLOTS 10 /* 240 bytes */
-#define MAPLE_ARANGE64_META_MAX 15 /* Out of range for metadata */
#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 1)
#else
/* 32bit sizes */
#define MAPLE_NODE_SLOTS 63 /* 256 bytes including ->parent */
#define MAPLE_RANGE64_SLOTS 32 /* 256 bytes */
#define MAPLE_ARANGE64_SLOTS 21 /* 240 bytes */
-#define MAPLE_ARANGE64_META_MAX 31 /* Out of range for metadata */
#define MAPLE_ALLOC_SLOTS (MAPLE_NODE_SLOTS - 2)
#endif /* defined(CONFIG_64BIT) || defined(BUILD_VDSO32_64) */
@@ -184,13 +182,23 @@ enum maple_type {
#ifdef CONFIG_LOCKDEP
typedef struct lockdep_map *lockdep_map_p;
-#define mt_lock_is_held(mt) lock_is_held(mt->ma_external_lock)
+#define mt_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || lock_is_held((mt)->ma_external_lock))
+
+#define mt_write_lock_is_held(mt) \
+ (!(mt)->ma_external_lock || \
+ lock_is_held_type((mt)->ma_external_lock, 0))
+
#define mt_set_external_lock(mt, lock) \
(mt)->ma_external_lock = &(lock)->dep_map
+
+#define mt_on_stack(mt) (mt).ma_external_lock = NULL
#else
typedef struct { /* nothing */ } lockdep_map_p;
-#define mt_lock_is_held(mt) 1
+#define mt_lock_is_held(mt) 1
+#define mt_write_lock_is_held(mt) 1
#define mt_set_external_lock(mt, lock) do { } while (0)
+#define mt_on_stack(mt) do { } while (0)
#endif
/*
@@ -212,8 +220,8 @@ struct maple_tree {
spinlock_t ma_lock;
lockdep_map_p ma_external_lock;
};
- void __rcu *ma_root;
unsigned int ma_flags;
+ void __rcu *ma_root;
};
/**
@@ -458,7 +466,7 @@ void *mas_find(struct ma_state *mas, unsigned long max);
void *mas_find_range(struct ma_state *mas, unsigned long max);
void *mas_find_rev(struct ma_state *mas, unsigned long min);
void *mas_find_range_rev(struct ma_state *mas, unsigned long max);
-int mas_preallocate(struct ma_state *mas, gfp_t gfp);
+int mas_preallocate(struct ma_state *mas, void *entry, gfp_t gfp);
bool mas_is_err(struct ma_state *mas);
bool mas_nomem(struct ma_state *mas, gfp_t gfp);
@@ -531,6 +539,22 @@ static inline void mas_reset(struct ma_state *mas)
*/
#define mas_for_each(__mas, __entry, __max) \
while (((__entry) = mas_find((__mas), (__max))) != NULL)
+/**
+ * __mas_set_range() - Set up Maple Tree operation state to a sub-range of the
+ * current location.
+ * @mas: Maple Tree operation state.
+ * @start: New start of range in the Maple Tree.
+ * @last: New end of range in the Maple Tree.
+ *
+ * set the internal maple state values to a sub-range.
+ * Please use mas_set_range() if you do not know where you are in the tree.
+ */
+static inline void __mas_set_range(struct ma_state *mas, unsigned long start,
+ unsigned long last)
+{
+ mas->index = start;
+ mas->last = last;
+}
/**
* mas_set_range() - Set up Maple Tree operation state for a different index.
@@ -545,9 +569,8 @@ static inline void mas_reset(struct ma_state *mas)
static inline
void mas_set_range(struct ma_state *mas, unsigned long start, unsigned long last)
{
- mas->index = start;
- mas->last = last;
- mas->node = MAS_START;
+ __mas_set_range(mas, start, last);
+ mas->node = MAS_START;
}
/**
@@ -662,10 +685,11 @@ void *mt_next(struct maple_tree *mt, unsigned long index, unsigned long max);
* mt_for_each - Iterate over each entry starting at index until max.
* @__tree: The Maple Tree
* @__entry: The current entry
- * @__index: The index to update to track the location in the tree
+ * @__index: The index to start the search from. Subsequently used as iterator.
* @__max: The maximum limit for @index
*
- * Note: Will not return the zero entry.
+ * This iterator skips all entries, which resolve to a NULL pointer,
+ * e.g. entries which has been reserved with XA_ZERO_ENTRY.
*/
#define mt_for_each(__tree, __entry, __index, __max) \
for (__entry = mt_find(__tree, &(__index), __max); \
diff --git a/include/linux/math.h b/include/linux/math.h
index 2d388650c556..dd4152711de7 100644
--- a/include/linux/math.h
+++ b/include/linux/math.h
@@ -156,6 +156,25 @@ __STRUCT_FRACT(u32)
({ signed type __x = (x); __x < 0 ? -__x : __x; }), other)
/**
+ * abs_diff - return absolute value of the difference between the arguments
+ * @a: the first argument
+ * @b: the second argument
+ *
+ * @a and @b have to be of the same type. With this restriction we compare
+ * signed to signed and unsigned to unsigned. The result is the subtraction
+ * the smaller of the two from the bigger, hence result is always a positive
+ * value.
+ *
+ * Return: an absolute value of the difference between the @a and @b.
+ */
+#define abs_diff(a, b) ({ \
+ typeof(a) __a = (a); \
+ typeof(b) __b = (b); \
+ (void)(&__a == &__b); \
+ __a > __b ? (__a - __b) : (__b - __a); \
+})
+
+/**
* reciprocal_scale - "scale" a value into range [0, ep_ro)
* @val: value
* @ep_ro: right open interval endpoint
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f71ff9f0ec81..1c1072e3ca06 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -581,9 +581,7 @@ extern void *alloc_large_system_hash(const char *tablename,
unsigned long high_limit);
#define HASH_EARLY 0x00000001 /* Allocating during early boot? */
-#define HASH_SMALL 0x00000002 /* sub-page allocation allowed, min
- * shift passed via *_hash_shift */
-#define HASH_ZERO 0x00000004 /* Zero allocated hash table */
+#define HASH_ZERO 0x00000002 /* Zero allocated hash table */
/* Only NUMA needs hash distribution. 64bit NUMA architectures have
* sufficient vmalloc space.
@@ -596,13 +594,11 @@ extern int hashdist; /* Distribute hashes across NUMA nodes? */
#endif
#ifdef CONFIG_MEMTEST
-extern phys_addr_t early_memtest_bad_size; /* Size of faulty ram found by memtest */
-extern bool early_memtest_done; /* Was early memtest done? */
-extern void early_memtest(phys_addr_t start, phys_addr_t end);
+void early_memtest(phys_addr_t start, phys_addr_t end);
+void memtest_report_meminfo(struct seq_file *m);
#else
-static inline void early_memtest(phys_addr_t start, phys_addr_t end)
-{
-}
+static inline void early_memtest(phys_addr_t start, phys_addr_t end) { }
+static inline void memtest_report_meminfo(struct seq_file *m) { }
#endif
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index dbf26bc89dd4..ab94ad4597d0 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -61,7 +61,6 @@ struct mem_cgroup_reclaim_cookie {
#ifdef CONFIG_MEMCG
#define MEM_CGROUP_ID_SHIFT 16
-#define MEM_CGROUP_ID_MAX USHRT_MAX
struct mem_cgroup_id {
int id;
@@ -112,6 +111,9 @@ struct lruvec_stats {
/* Aggregated (CPU and subtree) state */
long state[NR_VM_NODE_STAT_ITEMS];
+ /* Non-hierarchical (CPU aggregated) state */
+ long state_local[NR_VM_NODE_STAT_ITEMS];
+
/* Pending child counts during tree propagation */
long state_pending[NR_VM_NODE_STAT_ITEMS];
};
@@ -588,7 +590,7 @@ static inline void mem_cgroup_protection(struct mem_cgroup *root,
/*
* There is no reclaim protection applied to a targeted reclaim.
* We are special casing this specific case here because
- * mem_cgroup_protected calculation is not robust enough to keep
+ * mem_cgroup_calculate_protection is not robust enough to keep
* the protection invariant for calculated effective values for
* parallel reclaimers with different reclaim target. This is
* especially a problem for tail memcgs (as they have pages on LRU)
@@ -866,8 +868,7 @@ static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec)
* parent_mem_cgroup - find the accounting parent of a memcg
* @memcg: memcg whose parent to find
*
- * Returns the parent memcg, or NULL if this is the root or the memory
- * controller is in legacy no-hierarchy mode.
+ * Returns the parent memcg, or NULL if this is the root.
*/
static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
{
@@ -1025,14 +1026,12 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
{
struct mem_cgroup_per_node *pn;
long x = 0;
- int cpu;
if (mem_cgroup_disabled())
return node_page_state(lruvec_pgdat(lruvec), idx);
pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- for_each_possible_cpu(cpu)
- x += per_cpu(pn->lruvec_stats_percpu->state[idx], cpu);
+ x = READ_ONCE(pn->lruvec_stats.state_local[idx]);
#ifdef CONFIG_SMP
if (x < 0)
x = 0;
@@ -1163,7 +1162,6 @@ unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
#else /* CONFIG_MEMCG */
#define MEM_CGROUP_ID_SHIFT 0
-#define MEM_CGROUP_ID_MAX 0
static inline struct mem_cgroup *folio_memcg(struct folio *folio)
{
@@ -1766,7 +1764,7 @@ int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
struct obj_cgroup *get_obj_cgroup_from_current(void);
-struct obj_cgroup *get_obj_cgroup_from_page(struct page *page);
+struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
@@ -1850,7 +1848,7 @@ static inline void __memcg_kmem_uncharge_page(struct page *page, int order)
{
}
-static inline struct obj_cgroup *get_obj_cgroup_from_page(struct page *page)
+static inline struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio)
{
return NULL;
}
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index fc9647b1b4f9..437441cdf78f 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -33,7 +33,7 @@ struct memory_dev_type {
#ifdef CONFIG_NUMA
extern bool numa_demotion_enabled;
struct memory_dev_type *alloc_memory_type(int adistance);
-void destroy_memory_type(struct memory_dev_type *memtype);
+void put_memory_type(struct memory_dev_type *memtype);
void init_node_memory_type(int node, struct memory_dev_type *default_type);
void clear_node_memory_type(int node, struct memory_dev_type *memtype);
#ifdef CONFIG_MIGRATION
@@ -68,7 +68,7 @@ static inline struct memory_dev_type *alloc_memory_type(int adistance)
return NULL;
}
-static inline void destroy_memory_type(struct memory_dev_type *memtype)
+static inline void put_memory_type(struct memory_dev_type *memtype)
{
}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 31343566c221..f53cfdaaaa41 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -77,11 +77,7 @@ struct memory_block {
*/
struct zone *zone;
struct device dev;
- /*
- * Number of vmemmap pages. These pages
- * lay at the beginning of the memory block.
- */
- unsigned long nr_vmemmap_pages;
+ struct vmem_altmap *altmap;
struct memory_group *group; /* group (if any) for this block */
struct list_head group_next; /* next block inside memory group */
#if defined(CONFIG_MEMORY_FAILURE) && defined(CONFIG_MEMORY_HOTPLUG)
@@ -147,7 +143,7 @@ static inline int hotplug_memory_notifier(notifier_fn_t fn, int pri)
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size,
- unsigned long vmemmap_pages,
+ struct vmem_altmap *altmap,
struct memory_group *group);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 013c69753c91..7d2076583494 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -97,6 +97,8 @@ typedef int __bitwise mhp_t;
* To do so, we will use the beginning of the hot-added range to build
* the page tables for the memmap array that describes the entire range.
* Only selected architectures support it with SPARSE_VMEMMAP.
+ * This is only a hint, the core kernel can decide to not do this based on
+ * different alignment checks.
*/
#define MHP_MEMMAP_ON_MEMORY ((__force mhp_t)BIT(1))
/*
@@ -354,7 +356,6 @@ extern struct zone *zone_for_pfn_range(int online_type, int nid,
extern int arch_create_linear_mapping(int nid, u64 start, u64 size,
struct mhp_params *params);
void arch_remove_linear_mapping(u64 start, u64 size);
-extern bool mhp_supports_memmap_on_memory(unsigned long size);
#endif /* CONFIG_MEMORY_HOTPLUG */
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h
index 473545a2c425..6fa21791fc85 100644
--- a/include/linux/mfd/88pm860x.h
+++ b/include/linux/mfd/88pm860x.h
@@ -472,13 +472,7 @@ extern int pm860x_bulk_read(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_bulk_write(struct i2c_client *, int, int, unsigned char *);
extern int pm860x_set_bits(struct i2c_client *, int, unsigned char,
unsigned char);
-extern int pm860x_page_reg_read(struct i2c_client *, int);
extern int pm860x_page_reg_write(struct i2c_client *, int, unsigned char);
extern int pm860x_page_bulk_read(struct i2c_client *, int, int,
unsigned char *);
-extern int pm860x_page_bulk_write(struct i2c_client *, int, int,
- unsigned char *);
-extern int pm860x_page_set_bits(struct i2c_client *, int, unsigned char,
- unsigned char);
-
#endif /* __LINUX_MFD_88PM860X_H */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 302a330c5c84..09fb3c56e7d7 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -382,10 +382,6 @@ struct ab8500_platform_data {
struct ab8500_sysctrl_platform_data *sysctrl;
};
-extern int ab8500_init(struct ab8500 *ab8500,
- enum ab8500_version version);
-extern int ab8500_exit(struct ab8500 *ab8500);
-
extern int ab8500_suspend(struct ab8500 *ab8500);
static inline int is_ab8500(struct ab8500 *ab)
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index e7a7e70fdb38..dd0fc891b228 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -556,16 +556,6 @@ static inline void prcmu_clear(unsigned int reg, u32 bits)
#define PRCMU_QOS_ARM_OPP 3
#define PRCMU_QOS_DEFAULT_VALUE -1
-static inline unsigned long prcmu_qos_get_cpufreq_opp_delay(void)
-{
- return 0;
-}
-
-static inline int prcmu_qos_requirement(int prcmu_qos_class)
-{
- return 0;
-}
-
static inline int prcmu_qos_add_requirement(int prcmu_qos_class,
char *name, s32 value)
{
@@ -582,15 +572,4 @@ static inline void prcmu_qos_remove_requirement(int prcmu_qos_class, char *name)
{
}
-static inline int prcmu_qos_add_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-static inline int prcmu_qos_remove_notifier(int prcmu_qos_class,
- struct notifier_block *notifier)
-{
- return 0;
-}
-
#endif /* __MACH_PRCMU_H */
diff --git a/include/linux/mfd/hi655x-pmic.h b/include/linux/mfd/hi655x-pmic.h
index 6a012784dd1b..194556851ccf 100644
--- a/include/linux/mfd/hi655x-pmic.h
+++ b/include/linux/mfd/hi655x-pmic.h
@@ -52,7 +52,6 @@
#define OTMP_D1R_INT_MASK BIT(OTMP_D1R_INT)
struct hi655x_pmic {
- struct resource *res;
struct device *dev;
struct regmap *regmap;
struct gpio_desc *gpio;
diff --git a/include/linux/mfd/max77686-private.h b/include/linux/mfd/max77686-private.h
index 3acceeedbaba..ea635d12a741 100644
--- a/include/linux/mfd/max77686-private.h
+++ b/include/linux/mfd/max77686-private.h
@@ -441,8 +441,4 @@ enum max77686_types {
TYPE_MAX77802,
};
-extern int max77686_irq_init(struct max77686_dev *max77686);
-extern void max77686_irq_exit(struct max77686_dev *max77686);
-extern int max77686_irq_resume(struct max77686_dev *max77686);
-
#endif /* __LINUX_MFD_MAX77686_PRIV_H */
diff --git a/include/linux/mfd/rz-mtu3.h b/include/linux/mfd/rz-mtu3.h
index c5173bc06270..8421d49500bf 100644
--- a/include/linux/mfd/rz-mtu3.h
+++ b/include/linux/mfd/rz-mtu3.h
@@ -151,7 +151,6 @@ struct rz_mtu3 {
void *priv_data;
};
-#if IS_ENABLED(CONFIG_RZ_MTU3)
static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
{
mutex_lock(&ch->lock);
@@ -188,70 +187,5 @@ void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val);
void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val);
void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch, u16 off,
u16 pos, u8 val);
-#else
-static inline bool rz_mtu3_request_channel(struct rz_mtu3_channel *ch)
-{
- return false;
-}
-
-static inline void rz_mtu3_release_channel(struct rz_mtu3_channel *ch)
-{
-}
-
-static inline bool rz_mtu3_is_enabled(struct rz_mtu3_channel *ch)
-{
- return false;
-}
-
-static inline void rz_mtu3_disable(struct rz_mtu3_channel *ch)
-{
-}
-
-static inline int rz_mtu3_enable(struct rz_mtu3_channel *ch)
-{
- return 0;
-}
-
-static inline u8 rz_mtu3_8bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u16 rz_mtu3_16bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u32 rz_mtu3_32bit_ch_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline u16 rz_mtu3_shared_reg_read(struct rz_mtu3_channel *ch, u16 off)
-{
- return 0;
-}
-
-static inline void rz_mtu3_8bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u8 val)
-{
-}
-
-static inline void rz_mtu3_16bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
-{
-}
-
-static inline void rz_mtu3_32bit_ch_write(struct rz_mtu3_channel *ch, u16 off, u32 val)
-{
-}
-
-static inline void rz_mtu3_shared_reg_write(struct rz_mtu3_channel *ch, u16 off, u16 val)
-{
-}
-
-static inline void rz_mtu3_shared_reg_update_bit(struct rz_mtu3_channel *ch,
- u16 off, u16 pos, u8 val)
-{
-}
-#endif
#endif /* __MFD_RZ_MTU3_H__ */
diff --git a/include/linux/mhi.h b/include/linux/mhi.h
index f6de4b6ecfc7..039943ec4d4e 100644
--- a/include/linux/mhi.h
+++ b/include/linux/mhi.h
@@ -299,6 +299,10 @@ struct mhi_controller_config {
* @iova_start: IOMMU starting address for data (required)
* @iova_stop: IOMMU stop address for data (required)
* @fw_image: Firmware image name for normal booting (optional)
+ * @fw_data: Firmware image data content for normal booting, used only
+ * if fw_image is NULL and fbc_download is true (optional)
+ * @fw_sz: Firmware image data size for normal booting, used only if fw_image
+ * is NULL and fbc_download is true (optional)
* @edl_image: Firmware image name for emergency download mode (optional)
* @rddm_size: RAM dump size that host should allocate for debugging purpose
* @sbl_size: SBL image size downloaded through BHIe (optional)
@@ -384,6 +388,8 @@ struct mhi_controller {
dma_addr_t iova_start;
dma_addr_t iova_stop;
const char *fw_image;
+ const u8 *fw_data;
+ size_t fw_sz;
const char *edl_image;
size_t rddm_size;
size_t sbl_size;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 8bef1ab62bba..4e27ca7c49de 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -41,9 +41,10 @@
#define PHY_ID_KSZ9477 0x00221631
/* struct phy_device dev_flags definitions */
-#define MICREL_PHY_50MHZ_CLK 0x00000001
-#define MICREL_PHY_FXEN 0x00000002
-#define MICREL_KSZ8_P1_ERRATA 0x00000003
+#define MICREL_PHY_50MHZ_CLK BIT(0)
+#define MICREL_PHY_FXEN BIT(1)
+#define MICREL_KSZ8_P1_ERRATA BIT(2)
+#define MICREL_NO_EEE BIT(3)
#define MICREL_KSZ9021_EXTREG_CTRL 0xB
#define MICREL_KSZ9021_EXTREG_DATA_WRITE 0xC
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 396df1121bff..83aebc244cba 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -3,6 +3,7 @@
#define _LINUX_MINMAX_H
#include <linux/const.h>
+#include <linux/types.h>
/*
* min()/max()/clamp() macros must accomplish three things:
@@ -133,6 +134,70 @@
*/
#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
+/*
+ * Remove a const qualifier from integer types
+ * _Generic(foo, type-name: association, ..., default: association) performs a
+ * comparison against the foo type (not the qualified type).
+ * Do not use the const keyword in the type-name as it will not match the
+ * unqualified type of foo.
+ */
+#define __unconst_integer_type_cases(type) \
+ unsigned type: (unsigned type)0, \
+ signed type: (signed type)0
+
+#define __unconst_integer_typeof(x) typeof( \
+ _Generic((x), \
+ char: (char)0, \
+ __unconst_integer_type_cases(char), \
+ __unconst_integer_type_cases(short), \
+ __unconst_integer_type_cases(int), \
+ __unconst_integer_type_cases(long), \
+ __unconst_integer_type_cases(long long), \
+ default: (x)))
+
+/*
+ * Do not check the array parameter using __must_be_array().
+ * In the following legit use-case where the "array" passed is a simple pointer,
+ * __must_be_array() will return a failure.
+ * --- 8< ---
+ * int *buff
+ * ...
+ * min = min_array(buff, nb_items);
+ * --- 8< ---
+ *
+ * The first typeof(&(array)[0]) is needed in order to support arrays of both
+ * 'int *buff' and 'int buff[N]' types.
+ *
+ * The array can be an array of const items.
+ * typeof() keeps the const qualifier. Use __unconst_integer_typeof() in order
+ * to discard the const qualifier for the __element variable.
+ */
+#define __minmax_array(op, array, len) ({ \
+ typeof(&(array)[0]) __array = (array); \
+ typeof(len) __len = (len); \
+ __unconst_integer_typeof(__array[0]) __element = __array[--__len]; \
+ while (__len--) \
+ __element = op(__element, __array[__len]); \
+ __element; })
+
+/**
+ * min_array - return minimum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define min_array(array, len) __minmax_array(min, array, len)
+
+/**
+ * max_array - return maximum of values present in an array
+ * @array: array
+ * @len: array length
+ *
+ * Note that @len must not be zero (empty array).
+ */
+#define max_array(array, len) __minmax_array(max, array, len)
+
/**
* clamp_t - return a value clamped to a given range using a given type
* @type: the type of variable to use
@@ -158,6 +223,32 @@
*/
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
+static inline bool in_range64(u64 val, u64 start, u64 len)
+{
+ return (val - start) < len;
+}
+
+static inline bool in_range32(u32 val, u32 start, u32 len)
+{
+ return (val - start) < len;
+}
+
+/**
+ * in_range - Determine if a value lies within a range.
+ * @val: Value to test.
+ * @start: First value in range.
+ * @len: Number of values in range.
+ *
+ * This is more efficient than "if (start <= val && val < (start + len))".
+ * It also gives a different answer if @start + @len overflows the size of
+ * the type by a sufficient amount to encompass @val. Decide for yourself
+ * which behaviour you want, or prove that start + len never overflow.
+ * Do not blindly replace one form with the other.
+ */
+#define in_range(val, start, len) \
+ ((sizeof(start) | sizeof(len) | sizeof(val)) <= sizeof(u32) ? \
+ in_range32(val, start, len) : in_range64(val, start, len))
+
/**
* swap - swap values of @a and @b
* @a: first value
diff --git a/include/linux/misc_cgroup.h b/include/linux/misc_cgroup.h
index c238207d1615..e799b1f8d05b 100644
--- a/include/linux/misc_cgroup.h
+++ b/include/linux/misc_cgroup.h
@@ -31,17 +31,18 @@ struct misc_cg;
* struct misc_res: Per cgroup per misc type resource
* @max: Maximum limit on the resource.
* @usage: Current usage of the resource.
- * @failed: True if charged failed for the resource in a cgroup.
+ * @events: Number of times, the resource limit exceeded.
*/
struct misc_res {
- unsigned long max;
- atomic_long_t usage;
- atomic_long_t events;
+ u64 max;
+ atomic64_t usage;
+ atomic64_t events;
};
/**
* struct misc_cg - Miscellaneous controller's cgroup structure.
* @css: cgroup subsys state object.
+ * @events_file: Handle for the misc resources events file.
* @res: Array of misc resources usage in the cgroup.
*/
struct misc_cg {
@@ -53,12 +54,10 @@ struct misc_cg {
struct misc_res res[MISC_CG_RES_TYPES];
};
-unsigned long misc_cg_res_total_usage(enum misc_res_type type);
-int misc_cg_set_capacity(enum misc_res_type type, unsigned long capacity);
-int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
-void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg,
- unsigned long amount);
+u64 misc_cg_res_total_usage(enum misc_res_type type);
+int misc_cg_set_capacity(enum misc_res_type type, u64 capacity);
+int misc_cg_try_charge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
+void misc_cg_uncharge(enum misc_res_type type, struct misc_cg *cg, u64 amount);
/**
* css_misc() - Get misc cgroup from the css.
@@ -99,27 +98,26 @@ static inline void put_misc_cg(struct misc_cg *cg)
#else /* !CONFIG_CGROUP_MISC */
-static inline unsigned long misc_cg_res_total_usage(enum misc_res_type type)
+static inline u64 misc_cg_res_total_usage(enum misc_res_type type)
{
return 0;
}
-static inline int misc_cg_set_capacity(enum misc_res_type type,
- unsigned long capacity)
+static inline int misc_cg_set_capacity(enum misc_res_type type, u64 capacity)
{
return 0;
}
static inline int misc_cg_try_charge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
return 0;
}
static inline void misc_cg_uncharge(enum misc_res_type type,
struct misc_cg *cg,
- unsigned long amount)
+ u64 amount)
{
}
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 34f9dba17c1a..bf5d0b1b16f4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -319,11 +319,13 @@ extern unsigned int kobjsize(const void *objp);
#define VM_HIGH_ARCH_BIT_2 34 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_3 35 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_BIT_4 36 /* bit only usable on 64-bit architectures */
+#define VM_HIGH_ARCH_BIT_5 37 /* bit only usable on 64-bit architectures */
#define VM_HIGH_ARCH_0 BIT(VM_HIGH_ARCH_BIT_0)
#define VM_HIGH_ARCH_1 BIT(VM_HIGH_ARCH_BIT_1)
#define VM_HIGH_ARCH_2 BIT(VM_HIGH_ARCH_BIT_2)
#define VM_HIGH_ARCH_3 BIT(VM_HIGH_ARCH_BIT_3)
#define VM_HIGH_ARCH_4 BIT(VM_HIGH_ARCH_BIT_4)
+#define VM_HIGH_ARCH_5 BIT(VM_HIGH_ARCH_BIT_5)
#endif /* CONFIG_ARCH_USES_HIGH_VMA_FLAGS */
#ifdef CONFIG_ARCH_HAS_PKEYS
@@ -339,6 +341,21 @@ extern unsigned int kobjsize(const void *objp);
#endif
#endif /* CONFIG_ARCH_HAS_PKEYS */
+#ifdef CONFIG_X86_USER_SHADOW_STACK
+/*
+ * VM_SHADOW_STACK should not be set with VM_SHARED because of lack of
+ * support core mm.
+ *
+ * These VMAs will get a single end guard page. This helps userspace protect
+ * itself from attacks. A single page is enough for current shadow stack archs
+ * (x86). See the comments near alloc_shstk() in arch/x86/kernel/shstk.c
+ * for more details on the guard size.
+ */
+# define VM_SHADOW_STACK VM_HIGH_ARCH_5
+#else
+# define VM_SHADOW_STACK VM_NONE
+#endif
+
#if defined(CONFIG_X86)
# define VM_PAT VM_ARCH_1 /* PAT reserves whole VMA at once (x86) */
#elif defined(CONFIG_PPC)
@@ -370,7 +387,7 @@ extern unsigned int kobjsize(const void *objp);
#endif
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
-# define VM_UFFD_MINOR_BIT 37
+# define VM_UFFD_MINOR_BIT 38
# define VM_UFFD_MINOR BIT(VM_UFFD_MINOR_BIT) /* UFFD minor faults */
#else /* !CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
# define VM_UFFD_MINOR VM_NONE
@@ -397,6 +414,8 @@ extern unsigned int kobjsize(const void *objp);
#define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
#endif
+#define VM_STARTGAP_FLAGS (VM_GROWSDOWN | VM_SHADOW_STACK)
+
#ifdef CONFIG_STACK_GROWSUP
#define VM_STACK VM_GROWSUP
#define VM_STACK_EARLY VM_GROWSDOWN
@@ -532,13 +551,6 @@ struct vm_fault {
*/
};
-/* page entry size for vm->huge_fault() */
-enum page_entry_size {
- PE_SIZE_PTE = 0,
- PE_SIZE_PMD,
- PE_SIZE_PUD,
-};
-
/*
* These are the virtual MM functions - opening of an area, closing and
* unmapping it (needed to keep files on disk up-to-date etc), pointer
@@ -562,8 +574,7 @@ struct vm_operations_struct {
int (*mprotect)(struct vm_area_struct *vma, unsigned long start,
unsigned long end, unsigned long newflags);
vm_fault_t (*fault)(struct vm_fault *vmf);
- vm_fault_t (*huge_fault)(struct vm_fault *vmf,
- enum page_entry_size pe_size);
+ vm_fault_t (*huge_fault)(struct vm_fault *vmf, unsigned int order);
vm_fault_t (*map_pages)(struct vm_fault *vmf,
pgoff_t start_pgoff, pgoff_t end_pgoff);
unsigned long (*pagesize)(struct vm_area_struct * area);
@@ -679,6 +690,7 @@ static inline void vma_end_read(struct vm_area_struct *vma)
rcu_read_unlock();
}
+/* WARNING! Can only be used if mmap_lock is expected to be write-locked */
static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
{
mmap_assert_write_locked(vma->vm_mm);
@@ -691,6 +703,11 @@ static bool __is_vma_write_locked(struct vm_area_struct *vma, int *mm_lock_seq)
return (vma->vm_lock_seq == *mm_lock_seq);
}
+/*
+ * Begin writing to a VMA.
+ * Exclude concurrent readers under the per-VMA lock until the currently
+ * write-locked mmap_lock is dropped or downgraded.
+ */
static inline void vma_start_write(struct vm_area_struct *vma)
{
int mm_lock_seq;
@@ -709,26 +726,17 @@ static inline void vma_start_write(struct vm_area_struct *vma)
up_write(&vma->vm_lock->lock);
}
-static inline bool vma_try_start_write(struct vm_area_struct *vma)
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
{
int mm_lock_seq;
- if (__is_vma_write_locked(vma, &mm_lock_seq))
- return true;
-
- if (!down_write_trylock(&vma->vm_lock->lock))
- return false;
-
- WRITE_ONCE(vma->vm_lock_seq, mm_lock_seq);
- up_write(&vma->vm_lock->lock);
- return true;
+ VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
}
-static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+static inline void vma_assert_locked(struct vm_area_struct *vma)
{
- int mm_lock_seq;
-
- VM_BUG_ON_VMA(!__is_vma_write_locked(vma, &mm_lock_seq), vma);
+ if (!rwsem_is_locked(&vma->vm_lock->lock))
+ vma_assert_write_locked(vma);
}
static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
@@ -739,6 +747,22 @@ static inline void vma_mark_detached(struct vm_area_struct *vma, bool detached)
vma->detached = detached;
}
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ vma_end_read(vmf->vma);
+ else
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
+
+static inline void assert_fault_locked(struct vm_fault *vmf)
+{
+ if (vmf->flags & FAULT_FLAG_VMA_LOCK)
+ vma_assert_locked(vmf->vma);
+ else
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+
struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
unsigned long address);
@@ -748,25 +772,40 @@ static inline bool vma_start_read(struct vm_area_struct *vma)
{ return false; }
static inline void vma_end_read(struct vm_area_struct *vma) {}
static inline void vma_start_write(struct vm_area_struct *vma) {}
-static inline bool vma_try_start_write(struct vm_area_struct *vma)
- { return true; }
-static inline void vma_assert_write_locked(struct vm_area_struct *vma) {}
+static inline void vma_assert_write_locked(struct vm_area_struct *vma)
+ { mmap_assert_write_locked(vma->vm_mm); }
static inline void vma_mark_detached(struct vm_area_struct *vma,
bool detached) {}
+static inline struct vm_area_struct *lock_vma_under_rcu(struct mm_struct *mm,
+ unsigned long address)
+{
+ return NULL;
+}
+
+static inline void release_fault_lock(struct vm_fault *vmf)
+{
+ mmap_read_unlock(vmf->vma->vm_mm);
+}
+
+static inline void assert_fault_locked(struct vm_fault *vmf)
+{
+ mmap_assert_locked(vmf->vma->vm_mm);
+}
+
#endif /* CONFIG_PER_VMA_LOCK */
+extern const struct vm_operations_struct vma_dummy_vm_ops;
+
/*
* WARNING: vma_init does not initialize vma->vm_lock.
* Use vm_area_alloc()/vm_area_free() if vma needs locking.
*/
static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
{
- static const struct vm_operations_struct dummy_vm_ops = {};
-
memset(vma, 0, sizeof(*vma));
vma->vm_mm = mm;
- vma->vm_ops = &dummy_vm_ops;
+ vma->vm_ops = &vma_dummy_vm_ops;
INIT_LIST_HEAD(&vma->anon_vma_chain);
vma_mark_detached(vma, false);
vma_numab_state_init(vma);
@@ -779,18 +818,22 @@ static inline void vm_flags_init(struct vm_area_struct *vma,
ACCESS_PRIVATE(vma, __vm_flags) = flags;
}
-/* Use when VMA is part of the VMA tree and modifications need coordination */
+/*
+ * Use when VMA is part of the VMA tree and modifications need coordination
+ * Note: vm_flags_reset and vm_flags_reset_once do not lock the vma and
+ * it should be locked explicitly beforehand.
+ */
static inline void vm_flags_reset(struct vm_area_struct *vma,
vm_flags_t flags)
{
- vma_start_write(vma);
+ vma_assert_write_locked(vma);
vm_flags_init(vma, flags);
}
static inline void vm_flags_reset_once(struct vm_area_struct *vma,
vm_flags_t flags)
{
- vma_start_write(vma);
+ vma_assert_write_locked(vma);
WRITE_ONCE(ACCESS_PRIVATE(vma, __vm_flags), flags);
}
@@ -839,6 +882,31 @@ static inline bool vma_is_anonymous(struct vm_area_struct *vma)
return !vma->vm_ops;
}
+/*
+ * Indicate if the VMA is a heap for the given task; for
+ * /proc/PID/maps that is the heap of the main task.
+ */
+static inline bool vma_is_initial_heap(const struct vm_area_struct *vma)
+{
+ return vma->vm_start <= vma->vm_mm->brk &&
+ vma->vm_end >= vma->vm_mm->start_brk;
+}
+
+/*
+ * Indicate if the VMA is a stack for the given task; for
+ * /proc/PID/maps that is the stack of the main task.
+ */
+static inline bool vma_is_initial_stack(const struct vm_area_struct *vma)
+{
+ /*
+ * We make no effort to guess what a given thread considers to be
+ * its "stack". It's not even well-defined for programs written
+ * languages like Go.
+ */
+ return vma->vm_start <= vma->vm_mm->start_stack &&
+ vma->vm_end >= vma->vm_mm->start_stack;
+}
+
static inline bool vma_is_temporary_stack(struct vm_area_struct *vma)
{
int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
@@ -976,7 +1044,7 @@ struct inode;
* compound_order() can be called without holding a reference, which means
* that niceties like page_folio() don't work. These callers should be
* prepared to handle wild return values. For example, PG_head may be
- * set before _folio_order is initialised, or this may be a tail page.
+ * set before the order is initialised, or this may be a tail page.
* See compaction.c for some good examples.
*/
static inline unsigned int compound_order(struct page *page)
@@ -985,7 +1053,7 @@ static inline unsigned int compound_order(struct page *page)
if (!test_bit(PG_head, &folio->flags))
return 0;
- return folio->_folio_order;
+ return folio->_flags_1 & 0xff;
}
/**
@@ -1001,7 +1069,7 @@ static inline unsigned int folio_order(struct folio *folio)
{
if (!folio_test_large(folio))
return 0;
- return folio->_folio_order;
+ return folio->_flags_1 & 0xff;
}
#include <linux/huge_mm.h>
@@ -1072,11 +1140,6 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-
-#ifndef is_ioremap_addr
-#define is_ioremap_addr(x) is_vmalloc_addr(x)
-#endif
-
#ifdef CONFIG_MMU
extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
@@ -1220,33 +1283,6 @@ void folio_copy(struct folio *dst, struct folio *src);
unsigned long nr_free_buffer_pages(void);
-/*
- * Compound pages have a destructor function. Provide a
- * prototype for that function and accessor functions.
- * These are _only_ valid on the head of a compound page.
- */
-typedef void compound_page_dtor(struct page *);
-
-/* Keep the enum in sync with compound_page_dtors array in mm/page_alloc.c */
-enum compound_dtor_id {
- NULL_COMPOUND_DTOR,
- COMPOUND_PAGE_DTOR,
-#ifdef CONFIG_HUGETLB_PAGE
- HUGETLB_PAGE_DTOR,
-#endif
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- TRANSHUGE_PAGE_DTOR,
-#endif
- NR_COMPOUND_DTORS,
-};
-
-static inline void folio_set_compound_dtor(struct folio *folio,
- enum compound_dtor_id compound_dtor)
-{
- VM_BUG_ON_FOLIO(compound_dtor >= NR_COMPOUND_DTORS, folio);
- folio->_folio_dtor = compound_dtor;
-}
-
void destroy_large_folio(struct folio *folio);
/* Returns the number of bytes in this potentially compound page. */
@@ -1282,8 +1318,6 @@ static inline unsigned long thp_size(struct page *page)
return PAGE_SIZE << thp_order(page);
}
-void free_compound_page(struct page *page);
-
#ifdef CONFIG_MMU
/*
* Do pte_mkwrite, but only if the vma says VM_WRITE. We do this when
@@ -1294,12 +1328,13 @@ void free_compound_page(struct page *page);
static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
- pte = pte_mkwrite(pte);
+ pte = pte_mkwrite(pte, vma);
return pte;
}
vm_fault_t do_set_pmd(struct vm_fault *vmf, struct page *page);
-void do_set_pte(struct vm_fault *vmf, struct page *page, unsigned long addr);
+void set_pte_range(struct vm_fault *vmf, struct folio *folio,
+ struct page *page, unsigned int nr, unsigned long addr);
vm_fault_t finish_fault(struct vm_fault *vmf);
vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
@@ -2006,7 +2041,7 @@ static inline long folio_nr_pages(struct folio *folio)
#ifdef CONFIG_64BIT
return folio->_folio_nr_pages;
#else
- return 1L << folio->_folio_order;
+ return 1L << (folio->_flags_1 & 0xff);
#endif
}
@@ -2024,7 +2059,7 @@ static inline unsigned long compound_nr(struct page *page)
#ifdef CONFIG_64BIT
return folio->_folio_nr_pages;
#else
- return 1L << folio->_folio_order;
+ return 1L << (folio->_flags_1 & 0xff);
#endif
}
@@ -2170,7 +2205,6 @@ static inline void *folio_address(const struct folio *folio)
return page_address(&folio->page);
}
-extern void *page_rmapping(struct page *page);
extern pgoff_t __page_file_index(struct page *page);
/*
@@ -2238,18 +2272,6 @@ extern void pagefault_out_of_memory(void);
#define offset_in_folio(folio, p) ((unsigned long)(p) & (folio_size(folio) - 1))
/*
- * Flags passed to show_mem() and show_free_areas() to suppress output in
- * various contexts.
- */
-#define SHOW_MEM_FILTER_NODES (0x0001u) /* disallowed nodes */
-
-extern void __show_free_areas(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
-static void __maybe_unused show_free_areas(unsigned int flags, nodemask_t *nodemask)
-{
- __show_free_areas(flags, nodemask, MAX_NR_ZONES - 1);
-}
-
-/*
* Parameter block passed down to zap_pte_range in exceptional cases.
*/
struct zap_details {
@@ -2317,9 +2339,9 @@ static inline void zap_vma_pages(struct vm_area_struct *vma)
zap_page_range_single(vma, vma->vm_start,
vma->vm_end - vma->vm_start, NULL);
}
-void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
+void unmap_vmas(struct mmu_gather *tlb, struct ma_state *mas,
struct vm_area_struct *start_vma, unsigned long start,
- unsigned long end, bool mm_wr_locked);
+ unsigned long end, unsigned long tree_end, bool mm_wr_locked);
struct mmu_notifier_range;
@@ -2766,42 +2788,93 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
}
#endif /* CONFIG_MMU */
+static inline struct ptdesc *virt_to_ptdesc(const void *x)
+{
+ return page_ptdesc(virt_to_page(x));
+}
+
+static inline void *ptdesc_to_virt(const struct ptdesc *pt)
+{
+ return page_to_virt(ptdesc_page(pt));
+}
+
+static inline void *ptdesc_address(const struct ptdesc *pt)
+{
+ return folio_address(ptdesc_folio(pt));
+}
+
+static inline bool pagetable_is_reserved(struct ptdesc *pt)
+{
+ return folio_test_reserved(ptdesc_folio(pt));
+}
+
+/**
+ * pagetable_alloc - Allocate pagetables
+ * @gfp: GFP flags
+ * @order: desired pagetable order
+ *
+ * pagetable_alloc allocates memory for page tables as well as a page table
+ * descriptor to describe that memory.
+ *
+ * Return: The ptdesc describing the allocated page tables.
+ */
+static inline struct ptdesc *pagetable_alloc(gfp_t gfp, unsigned int order)
+{
+ struct page *page = alloc_pages(gfp | __GFP_COMP, order);
+
+ return page_ptdesc(page);
+}
+
+/**
+ * pagetable_free - Free pagetables
+ * @pt: The page table descriptor
+ *
+ * pagetable_free frees the memory of all page tables described by a page
+ * table descriptor and the memory for the descriptor itself.
+ */
+static inline void pagetable_free(struct ptdesc *pt)
+{
+ struct page *page = ptdesc_page(pt);
+
+ __free_pages(page, compound_order(page));
+}
+
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
void __init ptlock_cache_init(void);
-extern bool ptlock_alloc(struct page *page);
-extern void ptlock_free(struct page *page);
+bool ptlock_alloc(struct ptdesc *ptdesc);
+void ptlock_free(struct ptdesc *ptdesc);
-static inline spinlock_t *ptlock_ptr(struct page *page)
+static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
- return page->ptl;
+ return ptdesc->ptl;
}
#else /* ALLOC_SPLIT_PTLOCKS */
static inline void ptlock_cache_init(void)
{
}
-static inline bool ptlock_alloc(struct page *page)
+static inline bool ptlock_alloc(struct ptdesc *ptdesc)
{
return true;
}
-static inline void ptlock_free(struct page *page)
+static inline void ptlock_free(struct ptdesc *ptdesc)
{
}
-static inline spinlock_t *ptlock_ptr(struct page *page)
+static inline spinlock_t *ptlock_ptr(struct ptdesc *ptdesc)
{
- return &page->ptl;
+ return &ptdesc->ptl;
}
#endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return ptlock_ptr(pmd_page(*pmd));
+ return ptlock_ptr(page_ptdesc(pmd_page(*pmd)));
}
-static inline bool ptlock_init(struct page *page)
+static inline bool ptlock_init(struct ptdesc *ptdesc)
{
/*
* prep_new_page() initialize page->private (and therefore page->ptl)
@@ -2810,10 +2883,10 @@ static inline bool ptlock_init(struct page *page)
* It can happen if arch try to use slab for page table allocation:
* slab code uses page->slab_cache, which share storage with page->ptl.
*/
- VM_BUG_ON_PAGE(*(unsigned long *)&page->ptl, page);
- if (!ptlock_alloc(page))
+ VM_BUG_ON_PAGE(*(unsigned long *)&ptdesc->ptl, ptdesc_page(ptdesc));
+ if (!ptlock_alloc(ptdesc))
return false;
- spin_lock_init(ptlock_ptr(page));
+ spin_lock_init(ptlock_ptr(ptdesc));
return true;
}
@@ -2826,24 +2899,28 @@ static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
static inline void ptlock_cache_init(void) {}
-static inline bool ptlock_init(struct page *page) { return true; }
-static inline void ptlock_free(struct page *page) {}
+static inline bool ptlock_init(struct ptdesc *ptdesc) { return true; }
+static inline void ptlock_free(struct ptdesc *ptdesc) {}
#endif /* USE_SPLIT_PTE_PTLOCKS */
-static inline bool pgtable_pte_page_ctor(struct page *page)
+static inline bool pagetable_pte_ctor(struct ptdesc *ptdesc)
{
- if (!ptlock_init(page))
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ if (!ptlock_init(ptdesc))
return false;
- __SetPageTable(page);
- inc_lruvec_page_state(page, NR_PAGETABLE);
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
return true;
}
-static inline void pgtable_pte_page_dtor(struct page *page)
+static inline void pagetable_pte_dtor(struct ptdesc *ptdesc)
{
- ptlock_free(page);
- __ClearPageTable(page);
- dec_lruvec_page_state(page, NR_PAGETABLE);
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ ptlock_free(ptdesc);
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
}
pte_t *__pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp);
@@ -2892,28 +2969,33 @@ static inline struct page *pmd_pgtable_page(pmd_t *pmd)
return virt_to_page((void *)((unsigned long) pmd & mask));
}
+static inline struct ptdesc *pmd_ptdesc(pmd_t *pmd)
+{
+ return page_ptdesc(pmd_pgtable_page(pmd));
+}
+
static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
- return ptlock_ptr(pmd_pgtable_page(pmd));
+ return ptlock_ptr(pmd_ptdesc(pmd));
}
-static inline bool pmd_ptlock_init(struct page *page)
+static inline bool pmd_ptlock_init(struct ptdesc *ptdesc)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- page->pmd_huge_pte = NULL;
+ ptdesc->pmd_huge_pte = NULL;
#endif
- return ptlock_init(page);
+ return ptlock_init(ptdesc);
}
-static inline void pmd_ptlock_free(struct page *page)
+static inline void pmd_ptlock_free(struct ptdesc *ptdesc)
{
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- VM_BUG_ON_PAGE(page->pmd_huge_pte, page);
+ VM_BUG_ON_PAGE(ptdesc->pmd_huge_pte, ptdesc_page(ptdesc));
#endif
- ptlock_free(page);
+ ptlock_free(ptdesc);
}
-#define pmd_huge_pte(mm, pmd) (pmd_pgtable_page(pmd)->pmd_huge_pte)
+#define pmd_huge_pte(mm, pmd) (pmd_ptdesc(pmd)->pmd_huge_pte)
#else
@@ -2922,8 +3004,8 @@ static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
return &mm->page_table_lock;
}
-static inline bool pmd_ptlock_init(struct page *page) { return true; }
-static inline void pmd_ptlock_free(struct page *page) {}
+static inline bool pmd_ptlock_init(struct ptdesc *ptdesc) { return true; }
+static inline void pmd_ptlock_free(struct ptdesc *ptdesc) {}
#define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
@@ -2936,20 +3018,24 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
return ptl;
}
-static inline bool pgtable_pmd_page_ctor(struct page *page)
+static inline bool pagetable_pmd_ctor(struct ptdesc *ptdesc)
{
- if (!pmd_ptlock_init(page))
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ if (!pmd_ptlock_init(ptdesc))
return false;
- __SetPageTable(page);
- inc_lruvec_page_state(page, NR_PAGETABLE);
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
return true;
}
-static inline void pgtable_pmd_page_dtor(struct page *page)
+static inline void pagetable_pmd_dtor(struct ptdesc *ptdesc)
{
- pmd_ptlock_free(page);
- __ClearPageTable(page);
- dec_lruvec_page_state(page, NR_PAGETABLE);
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ pmd_ptlock_free(ptdesc);
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
}
/*
@@ -3004,6 +3090,11 @@ static inline void mark_page_reserved(struct page *page)
adjust_managed_page_count(page, -1);
}
+static inline void free_reserved_ptdesc(struct ptdesc *pt)
+{
+ free_reserved_page(ptdesc_page(pt));
+}
+
/*
* Default method to free all the __init memory into the buddy system.
* The freed pages will be poisoned with pattern "poison" if it's within
@@ -3069,9 +3160,9 @@ extern void mem_init(void);
extern void __init mmap_init(void);
extern void __show_mem(unsigned int flags, nodemask_t *nodemask, int max_zone_idx);
-static inline void show_mem(unsigned int flags, nodemask_t *nodemask)
+static inline void show_mem(void)
{
- __show_mem(flags, nodemask, MAX_NR_ZONES - 1);
+ __show_mem(0, NULL, MAX_NR_ZONES - 1);
}
extern long si_mem_available(void);
extern void si_meminfo(struct sysinfo * val);
@@ -3193,7 +3284,8 @@ extern unsigned long mmap_region(struct file *file, unsigned long addr,
struct list_head *uf);
extern unsigned long do_mmap(struct file *file, unsigned long addr,
unsigned long len, unsigned long prot, unsigned long flags,
- unsigned long pgoff, unsigned long *populate, struct list_head *uf);
+ vm_flags_t vm_flags, unsigned long pgoff, unsigned long *populate,
+ struct list_head *uf);
extern int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
unsigned long start, size_t len, struct list_head *uf,
bool unlock);
@@ -3281,15 +3373,26 @@ struct vm_area_struct *vma_lookup(struct mm_struct *mm, unsigned long addr)
return mtree_load(&mm->mm_mt, addr);
}
+static inline unsigned long stack_guard_start_gap(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_GROWSDOWN)
+ return stack_guard_gap;
+
+ /* See reasoning around the VM_SHADOW_STACK definition */
+ if (vma->vm_flags & VM_SHADOW_STACK)
+ return PAGE_SIZE;
+
+ return 0;
+}
+
static inline unsigned long vm_start_gap(struct vm_area_struct *vma)
{
+ unsigned long gap = stack_guard_start_gap(vma);
unsigned long vm_start = vma->vm_start;
- if (vma->vm_flags & VM_GROWSDOWN) {
- vm_start -= stack_guard_gap;
- if (vm_start > vma->vm_start)
- vm_start = 0;
- }
+ vm_start -= gap;
+ if (vm_start > vma->vm_start)
+ vm_start = 0;
return vm_start;
}
@@ -3403,6 +3506,24 @@ static inline vm_fault_t vmf_error(int err)
return VM_FAULT_SIGBUS;
}
+/*
+ * Convert errno to return value for ->page_mkwrite() calls.
+ *
+ * This should eventually be merged with vmf_error() above, but will need a
+ * careful audit of all vmf_error() callers.
+ */
+static inline vm_fault_t vmf_fs_error(int err)
+{
+ if (err == 0)
+ return VM_FAULT_LOCKED;
+ if (err == -EFAULT || err == -EAGAIN)
+ return VM_FAULT_NOPAGE;
+ if (err == -ENOMEM)
+ return VM_FAULT_OOM;
+ /* -ENOSPC, -EDQUOT, -EIO ... */
+ return VM_FAULT_SIGBUS;
+}
+
struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
unsigned int foll_flags);
@@ -3509,8 +3630,8 @@ static inline bool debug_pagealloc_enabled(void)
}
/*
- * For use in fast paths after init_debug_pagealloc() has run, or when a
- * false negative result is not harmful when called too early.
+ * For use in fast paths after mem_debugging_and_hardening_init() has run,
+ * or when a false negative result is not harmful when called too early.
*/
static inline bool debug_pagealloc_enabled_static(void)
{
@@ -3665,13 +3786,32 @@ void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap);
#endif
-#ifdef CONFIG_ARCH_WANT_OPTIMIZE_VMEMMAP
-static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
- struct dev_pagemap *pgmap)
+#define VMEMMAP_RESERVE_NR 2
+#ifdef CONFIG_ARCH_WANT_OPTIMIZE_DAX_VMEMMAP
+static inline bool __vmemmap_can_optimize(struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
{
- return is_power_of_2(sizeof(struct page)) &&
- pgmap && (pgmap_vmemmap_nr(pgmap) > 1) && !altmap;
+ unsigned long nr_pages;
+ unsigned long nr_vmemmap_pages;
+
+ if (!pgmap || !is_power_of_2(sizeof(struct page)))
+ return false;
+
+ nr_pages = pgmap_vmemmap_nr(pgmap);
+ nr_vmemmap_pages = ((nr_pages * sizeof(struct page)) >> PAGE_SHIFT);
+ /*
+ * For vmemmap optimization with DAX we need minimum 2 vmemmap
+ * pages. See layout diagram in Documentation/mm/vmemmap_dedup.rst
+ */
+ return !altmap && (nr_vmemmap_pages > VMEMMAP_RESERVE_NR);
}
+/*
+ * If we don't have an architecture override, use the generic rule
+ */
+#ifndef vmemmap_can_optimize
+#define vmemmap_can_optimize __vmemmap_can_optimize
+#endif
+
#else
static inline bool vmemmap_can_optimize(struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 21d6c72bcc71..8148b30a9df1 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -523,6 +523,27 @@ static inline bool mm_tlb_flush_nested(struct mm_struct *mm)
return atomic_read(&mm->tlb_flush_pending) > 1;
}
+#ifdef CONFIG_MMU
+/*
+ * Computes the pte marker to copy from the given source entry into dst_vma.
+ * If no marker should be copied, returns 0.
+ * The caller should insert a new pte created with make_pte_marker().
+ */
+static inline pte_marker copy_pte_marker(
+ swp_entry_t entry, struct vm_area_struct *dst_vma)
+{
+ pte_marker srcm = pte_marker_get(entry);
+ /* Always copy error entries. */
+ pte_marker dstm = srcm & PTE_MARKER_POISONED;
+
+ /* Only copy PTE markers if UFFD register matches. */
+ if ((srcm & PTE_MARKER_UFFD_WP) && userfaultfd_wp(dst_vma))
+ dstm |= PTE_MARKER_UFFD_WP;
+
+ return dstm;
+}
+#endif
+
/*
* If this pte is wr-protected by uffd-wp in any form, arm the special pte to
* replace a none pte. NOTE! This should only be called when *pte is already
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 7d30dc4ff0ff..36c5b43999e6 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -141,20 +141,6 @@ struct page {
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
};
- struct { /* Page table pages */
- unsigned long _pt_pad_1; /* compound_head */
- pgtable_t pmd_huge_pte; /* protected by page->ptl */
- unsigned long _pt_pad_2; /* mapping */
- union {
- struct mm_struct *pt_mm; /* x86 pgds only */
- atomic_t pt_frag_refcount; /* powerpc */
- };
-#if ALLOC_SPLIT_PTLOCKS
- spinlock_t *ptl;
-#else
- spinlock_t ptl;
-#endif
- };
struct { /* ZONE_DEVICE pages */
/** @pgmap: Points to the hosting device page map. */
struct dev_pagemap *pgmap;
@@ -262,6 +248,14 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
return (struct page *)(~ENCODE_PAGE_BITS & (unsigned long)page);
}
+/*
+ * A swap entry has to fit into a "unsigned long", as the entry is hidden
+ * in the "index" field of the swapper address space.
+ */
+typedef struct {
+ unsigned long val;
+} swp_entry_t;
+
/**
* struct folio - Represents a contiguous set of bytes.
* @flags: Identical to the page flags.
@@ -272,14 +266,12 @@ static inline struct page *encoded_page_ptr(struct encoded_page *page)
* @index: Offset within the file, in units of pages. For anonymous memory,
* this is the index from the beginning of the mmap.
* @private: Filesystem per-folio data (see folio_attach_private()).
- * Used for swp_entry_t if folio_test_swapcache().
+ * @swap: Used for swp_entry_t if folio_test_swapcache().
* @_mapcount: Do not access this member directly. Use folio_mapcount() to
* find out how many times this folio is mapped by userspace.
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
- * @_folio_dtor: Which destructor to use for this folio.
- * @_folio_order: Do not use directly, call folio_order().
* @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
* @_nr_pages_mapped: Do not use directly, call folio_mapcount().
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
@@ -317,7 +309,10 @@ struct folio {
};
struct address_space *mapping;
pgoff_t index;
- void *private;
+ union {
+ void *private;
+ swp_entry_t swap;
+ };
atomic_t _mapcount;
atomic_t _refcount;
#ifdef CONFIG_MEMCG
@@ -331,9 +326,8 @@ struct folio {
struct {
unsigned long _flags_1;
unsigned long _head_1;
+ unsigned long _folio_avail;
/* public: */
- unsigned char _folio_dtor;
- unsigned char _folio_order;
atomic_t _entire_mapcount;
atomic_t _nr_pages_mapped;
atomic_t _pincount;
@@ -391,8 +385,89 @@ FOLIO_MATCH(compound_head, _head_1);
offsetof(struct page, pg) + 2 * sizeof(struct page))
FOLIO_MATCH(flags, _flags_2);
FOLIO_MATCH(compound_head, _head_2);
+FOLIO_MATCH(flags, _flags_2a);
+FOLIO_MATCH(compound_head, _head_2a);
#undef FOLIO_MATCH
+/**
+ * struct ptdesc - Memory descriptor for page tables.
+ * @__page_flags: Same as page flags. Unused for page tables.
+ * @pt_rcu_head: For freeing page table pages.
+ * @pt_list: List of used page tables. Used for s390 and x86.
+ * @_pt_pad_1: Padding that aliases with page's compound head.
+ * @pmd_huge_pte: Protected by ptdesc->ptl, used for THPs.
+ * @__page_mapping: Aliases with page->mapping. Unused for page tables.
+ * @pt_mm: Used for x86 pgds.
+ * @pt_frag_refcount: For fragmented page table tracking. Powerpc and s390 only.
+ * @_pt_pad_2: Padding to ensure proper alignment.
+ * @ptl: Lock for the page table.
+ * @__page_type: Same as page->page_type. Unused for page tables.
+ * @_refcount: Same as page refcount. Used for s390 page tables.
+ * @pt_memcg_data: Memcg data. Tracked for page tables here.
+ *
+ * This struct overlays struct page for now. Do not modify without a good
+ * understanding of the issues.
+ */
+struct ptdesc {
+ unsigned long __page_flags;
+
+ union {
+ struct rcu_head pt_rcu_head;
+ struct list_head pt_list;
+ struct {
+ unsigned long _pt_pad_1;
+ pgtable_t pmd_huge_pte;
+ };
+ };
+ unsigned long __page_mapping;
+
+ union {
+ struct mm_struct *pt_mm;
+ atomic_t pt_frag_refcount;
+ };
+
+ union {
+ unsigned long _pt_pad_2;
+#if ALLOC_SPLIT_PTLOCKS
+ spinlock_t *ptl;
+#else
+ spinlock_t ptl;
+#endif
+ };
+ unsigned int __page_type;
+ atomic_t _refcount;
+#ifdef CONFIG_MEMCG
+ unsigned long pt_memcg_data;
+#endif
+};
+
+#define TABLE_MATCH(pg, pt) \
+ static_assert(offsetof(struct page, pg) == offsetof(struct ptdesc, pt))
+TABLE_MATCH(flags, __page_flags);
+TABLE_MATCH(compound_head, pt_list);
+TABLE_MATCH(compound_head, _pt_pad_1);
+TABLE_MATCH(mapping, __page_mapping);
+TABLE_MATCH(rcu_head, pt_rcu_head);
+TABLE_MATCH(page_type, __page_type);
+TABLE_MATCH(_refcount, _refcount);
+#ifdef CONFIG_MEMCG
+TABLE_MATCH(memcg_data, pt_memcg_data);
+#endif
+#undef TABLE_MATCH
+static_assert(sizeof(struct ptdesc) <= sizeof(struct page));
+
+#define ptdesc_page(pt) (_Generic((pt), \
+ const struct ptdesc *: (const struct page *)(pt), \
+ struct ptdesc *: (struct page *)(pt)))
+
+#define ptdesc_folio(pt) (_Generic((pt), \
+ const struct ptdesc *: (const struct folio *)(pt), \
+ struct ptdesc *: (struct folio *)(pt)))
+
+#define page_ptdesc(p) (_Generic((p), \
+ const struct page *: (const struct ptdesc *)(p), \
+ struct page *: (struct ptdesc *)(p)))
+
/*
* Used for sizing the vmemmap region on some architectures
*/
@@ -812,7 +887,7 @@ struct mm_struct {
#ifdef CONFIG_KSM
/*
* Represent how many pages of this process are involved in KSM
- * merging.
+ * merging (not including ksm_zero_pages).
*/
unsigned long ksm_merging_pages;
/*
@@ -820,7 +895,12 @@ struct mm_struct {
* including merged and not merged.
*/
unsigned long ksm_rmap_items;
-#endif
+ /*
+ * Represent how many empty pages are merged with kernel zero
+ * pages when enabling KSM use_zero_pages.
+ */
+ unsigned long ksm_zero_pages;
+#endif /* CONFIG_KSM */
#ifdef CONFIG_LRU_GEN
struct {
/* this mm_struct is on lru_gen_mm_list */
@@ -1105,7 +1185,8 @@ enum vm_fault_reason {
{ VM_FAULT_RETRY, "RETRY" }, \
{ VM_FAULT_FALLBACK, "FALLBACK" }, \
{ VM_FAULT_DONE_COW, "DONE_COW" }, \
- { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }
+ { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" }, \
+ { VM_FAULT_COMPLETED, "COMPLETED" }
struct vm_special_mapping {
const char *name; /* The name, e.g. "[vdso]". */
@@ -1139,14 +1220,6 @@ enum tlb_flush_reason {
NR_TLB_FLUSH_REASONS,
};
- /*
- * A swap entry has to fit into a "unsigned long", as the entry is hidden
- * in the "index" field of the swapper address space.
- */
-typedef struct {
- unsigned long val;
-} swp_entry_t;
-
/**
* enum fault_flag - Fault flag definitions.
* @FAULT_FLAG_WRITE: Fault was a write fault.
diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
index 5414b5c6a103..aa44fff8bb9d 100644
--- a/include/linux/mm_types_task.h
+++ b/include/linux/mm_types_task.h
@@ -52,8 +52,8 @@ struct tlbflush_unmap_batch {
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
/*
* The arch code makes the following promise: generic code can modify a
- * PTE, then call arch_tlbbatch_add_mm() (which internally provides all
- * needed barriers), then call arch_tlbbatch_flush(), and the entries
+ * PTE, then call arch_tlbbatch_add_pending() (which internally provides
+ * all needed barriers), then call arch_tlbbatch_flush(), and the entries
* will be flushed on all CPUs by the time that arch_tlbbatch_flush()
* returns.
*/
diff --git a/include/linux/mman.h b/include/linux/mman.h
index cee1e4b566d8..40d94411d492 100644
--- a/include/linux/mman.h
+++ b/include/linux/mman.h
@@ -15,6 +15,9 @@
#ifndef MAP_32BIT
#define MAP_32BIT 0
#endif
+#ifndef MAP_ABOVE4G
+#define MAP_ABOVE4G 0
+#endif
#ifndef MAP_HUGE_2MB
#define MAP_HUGE_2MB 0
#endif
@@ -50,6 +53,7 @@
| MAP_STACK \
| MAP_HUGETLB \
| MAP_32BIT \
+ | MAP_ABOVE4G \
| MAP_HUGE_2MB \
| MAP_HUGE_1GB)
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index e05e167dbd16..8d38dcb6d044 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -73,6 +73,14 @@ static inline void mmap_assert_write_locked(struct mm_struct *mm)
}
#ifdef CONFIG_PER_VMA_LOCK
+/*
+ * Drop all currently-held per-VMA locks.
+ * This is called from the mmap_lock implementation directly before releasing
+ * a write-locked mmap_lock (or downgrading it to read-locked).
+ * This should normally NOT be called manually from other places.
+ * If you want to call this manually anyway, keep in mind that this will release
+ * *all* VMA write locks, including ones from further up the stack.
+ */
static inline void vma_end_write_all(struct mm_struct *mm)
{
mmap_assert_write_locked(mm);
@@ -118,16 +126,6 @@ static inline int mmap_write_lock_killable(struct mm_struct *mm)
return ret;
}
-static inline bool mmap_write_trylock(struct mm_struct *mm)
-{
- bool ret;
-
- __mmap_lock_trace_start_locking(mm, true);
- ret = down_write_trylock(&mm->mmap_lock) != 0;
- __mmap_lock_trace_acquire_returned(mm, true, ret);
- return ret;
-}
-
static inline void mmap_write_unlock(struct mm_struct *mm)
{
__mmap_lock_trace_released(mm, true);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 64a3e051c3c4..6e3c857606f1 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -187,27 +187,27 @@ struct mmu_notifier_ops {
const struct mmu_notifier_range *range);
/*
- * invalidate_range() is either called between
- * invalidate_range_start() and invalidate_range_end() when the
- * VM has to free pages that where unmapped, but before the
- * pages are actually freed, or outside of _start()/_end() when
- * a (remote) TLB is necessary.
+ * arch_invalidate_secondary_tlbs() is used to manage a non-CPU TLB
+ * which shares page-tables with the CPU. The
+ * invalidate_range_start()/end() callbacks should not be implemented as
+ * invalidate_secondary_tlbs() already catches the points in time when
+ * an external TLB needs to be flushed.
*
- * If invalidate_range() is used to manage a non-CPU TLB with
- * shared page-tables, it not necessary to implement the
- * invalidate_range_start()/end() notifiers, as
- * invalidate_range() already catches the points in time when an
- * external TLB range needs to be flushed. For more in depth
- * discussion on this see Documentation/mm/mmu_notifier.rst
+ * This requires arch_invalidate_secondary_tlbs() to be called while
+ * holding the ptl spin-lock and therefore this callback is not allowed
+ * to sleep.
*
- * Note that this function might be called with just a sub-range
- * of what was passed to invalidate_range_start()/end(), if
- * called between those functions.
+ * This is called by architecture code whenever invalidating a TLB
+ * entry. It is assumed that any secondary TLB has the same rules for
+ * when invalidations are required. If this is not the case architecture
+ * code will need to call this explicitly when required for secondary
+ * TLB invalidation.
*/
- void (*invalidate_range)(struct mmu_notifier *subscription,
- struct mm_struct *mm,
- unsigned long start,
- unsigned long end);
+ void (*arch_invalidate_secondary_tlbs)(
+ struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* These callbacks are used with the get/put interface to manage the
@@ -395,10 +395,9 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
extern int __mmu_notifier_invalidate_range_start(struct mmu_notifier_range *r);
-extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r,
- bool only_end);
-extern void __mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end);
+extern void __mmu_notifier_invalidate_range_end(struct mmu_notifier_range *r);
+extern void __mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
extern bool
mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range);
@@ -481,21 +480,14 @@ mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
might_sleep();
if (mm_has_notifiers(range->mm))
- __mmu_notifier_invalidate_range_end(range, false);
-}
-
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
- if (mm_has_notifiers(range->mm))
- __mmu_notifier_invalidate_range_end(range, true);
+ __mmu_notifier_invalidate_range_end(range);
}
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
- unsigned long start, unsigned long end)
+static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range(mm, start, end);
+ __mmu_notifier_arch_invalidate_secondary_tlbs(mm, start, end);
}
static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
@@ -582,45 +574,6 @@ static inline void mmu_notifier_range_init_owner(
__young; \
})
-#define ptep_clear_flush_notify(__vma, __address, __ptep) \
-({ \
- unsigned long ___addr = __address & PAGE_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pte_t ___pte; \
- \
- ___pte = ptep_clear_flush(__vma, __address, __ptep); \
- mmu_notifier_invalidate_range(___mm, ___addr, \
- ___addr + PAGE_SIZE); \
- \
- ___pte; \
-})
-
-#define pmdp_huge_clear_flush_notify(__vma, __haddr, __pmd) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PMD_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pmd_t ___pmd; \
- \
- ___pmd = pmdp_huge_clear_flush(__vma, __haddr, __pmd); \
- mmu_notifier_invalidate_range(___mm, ___haddr, \
- ___haddr + HPAGE_PMD_SIZE); \
- \
- ___pmd; \
-})
-
-#define pudp_huge_clear_flush_notify(__vma, __haddr, __pud) \
-({ \
- unsigned long ___haddr = __haddr & HPAGE_PUD_MASK; \
- struct mm_struct *___mm = (__vma)->vm_mm; \
- pud_t ___pud; \
- \
- ___pud = pudp_huge_clear_flush(__vma, __haddr, __pud); \
- mmu_notifier_invalidate_range(___mm, ___haddr, \
- ___haddr + HPAGE_PUD_SIZE); \
- \
- ___pud; \
-})
-
/*
* set_pte_at_notify() sets the pte _after_ running the notifier.
* This is safe to start by updating the secondary MMUs, because the primary MMU
@@ -711,12 +664,7 @@ void mmu_notifier_invalidate_range_end(struct mmu_notifier_range *range)
{
}
-static inline void
-mmu_notifier_invalidate_range_only_end(struct mmu_notifier_range *range)
-{
-}
-
-static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
+static inline void mmu_notifier_arch_invalidate_secondary_tlbs(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5e50b78d58ea..4106fbc5b4b3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -676,7 +676,6 @@ enum zone_watermarks {
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
-/* Fields and list protected by pagesets local_lock in page_alloc.c */
struct per_cpu_pages {
spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h
index 7ace7ba30203..1269543d0634 100644
--- a/include/linux/module_symbol.h
+++ b/include/linux/module_symbol.h
@@ -9,9 +9,7 @@ static inline int is_mapping_symbol(const char *str)
return true;
if (str[0] == 'L' && str[1] == '0')
return true;
- return str[0] == '$' &&
- (str[1] == 'a' || str[1] == 'd' || str[1] == 't' || str[1] == 'x')
- && (str[2] == '\0' || str[2] == '.');
+ return str[0] == '$';
}
#endif /* _LINUX_MODULE_SYMBOL_H */
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 03be088fb439..001b2ce83832 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -42,6 +42,11 @@ bool module_init_section(const char *name);
*/
bool module_exit_section(const char *name);
+/* Describes whether within_module_init() will consider this an init section
+ * or not. This behaviour changes with CONFIG_MODULE_UNLOAD.
+ */
+bool module_init_layout_section(const char *sname);
+
/*
* Apply the given relocation to the (simplified) ELF. Return -error
* or 0.
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 7c58c44662b8..914a9f974baa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -379,7 +379,7 @@ struct mtd_info {
struct module *owner;
struct device dev;
- int usecount;
+ struct kref refcnt;
struct mtd_debug_info dbg;
struct nvmem_device *nvmem;
struct nvmem_device *otp_user_nvmem;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 5159d692f9ce..90a141ba2a5a 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -1540,6 +1540,7 @@ int nand_reset_op(struct nand_chip *chip);
int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
unsigned int len);
int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_exit_status_op(struct nand_chip *chip);
int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
int nand_read_page_op(struct nand_chip *chip, unsigned int page,
unsigned int offset_in_page, void *buf, unsigned int len);
diff --git a/include/linux/net_mm.h b/include/linux/net_mm.h
deleted file mode 100644
index b298998bd5a0..000000000000
--- a/include/linux/net_mm.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-#ifdef CONFIG_MMU
-
-#ifdef CONFIG_INET
-extern const struct vm_operations_struct tcp_vm_ops;
-static inline bool vma_is_tcp(const struct vm_area_struct *vma)
-{
- return vma->vm_ops == &tcp_vm_ops;
-}
-#else
-static inline bool vma_is_tcp(const struct vm_area_struct *vma)
-{
- return false;
-}
-#endif /* CONFIG_INET*/
-
-#endif /* CONFIG_MMU */
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 20eeba8b009d..cd628c4b011e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -48,6 +48,7 @@ struct nfs_client {
#define NFS_CS_NOPING 6 /* - don't ping on connect */
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
+#define NFS_CS_PNFS 9 /* - Server used for pnfs */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index aa9f4c6ebe26..1c315f854ea8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -157,7 +157,9 @@ extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
-extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+extern void nfs_join_page_group(struct nfs_page *head,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index e3e6a64b98e0..e92e378df000 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -157,31 +157,31 @@ static inline void touch_nmi_watchdog(void)
#ifdef arch_trigger_cpumask_backtrace
static inline bool trigger_all_cpu_backtrace(void)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, false);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, -1);
return true;
}
-static inline bool trigger_allbutself_cpu_backtrace(void)
+static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
- arch_trigger_cpumask_backtrace(cpu_online_mask, true);
+ arch_trigger_cpumask_backtrace(cpu_online_mask, exclude_cpu);
return true;
}
static inline bool trigger_cpumask_backtrace(struct cpumask *mask)
{
- arch_trigger_cpumask_backtrace(mask, false);
+ arch_trigger_cpumask_backtrace(mask, -1);
return true;
}
static inline bool trigger_single_cpu_backtrace(int cpu)
{
- arch_trigger_cpumask_backtrace(cpumask_of(cpu), false);
+ arch_trigger_cpumask_backtrace(cpumask_of(cpu), -1);
return true;
}
/* generic implementation */
void nmi_trigger_cpumask_backtrace(const cpumask_t *mask,
- bool exclude_self,
+ int exclude_cpu,
void (*raise)(cpumask_t *mask));
bool nmi_cpu_backtrace(struct pt_regs *regs);
@@ -190,7 +190,7 @@ static inline bool trigger_all_cpu_backtrace(void)
{
return false;
}
-static inline bool trigger_allbutself_cpu_backtrace(void)
+static inline bool trigger_allbutcpu_cpu_backtrace(int exclude_cpu)
{
return false;
}
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index fa030d93b768..4523e4e83319 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -43,6 +43,8 @@ enum {
NVMEM_REMOVE,
NVMEM_CELL_ADD,
NVMEM_CELL_REMOVE,
+ NVMEM_LAYOUT_ADD,
+ NVMEM_LAYOUT_REMOVE,
};
#if IS_ENABLED(CONFIG_NVMEM)
@@ -256,7 +258,7 @@ static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np,
static inline struct device_node *
of_nvmem_layout_get_container(struct nvmem_device *nvmem)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return NULL;
}
#endif /* CONFIG_NVMEM && CONFIG_OF */
diff --git a/include/linux/of.h b/include/linux/of.h
index 6ecde0515677..6a9ddf20e79a 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -96,10 +96,12 @@ struct of_reconfig_data {
struct property *old_prop;
};
+extern const struct kobj_type of_node_ktype;
+extern const struct fwnode_operations of_fwnode_ops;
+
/**
* of_node_init - initialize a devicetree node
* @node: Pointer to device node that has been created by kzalloc()
- * @phandle_name: Name of property holding a phandle value
*
* On return the device_node refcount is set to one. Use of_node_put()
* on @node when done to free the memory allocated for it. If the node
@@ -107,9 +109,6 @@ struct of_reconfig_data {
* whether to free the memory will be done by node->release(), which is
* of_node_release().
*/
-/* initialize a node */
-extern const struct kobj_type of_node_ktype;
-extern const struct fwnode_operations of_fwnode_ops;
static inline void of_node_init(struct device_node *node)
{
#if defined(CONFIG_OF_KOBJ)
@@ -1580,6 +1579,29 @@ static inline int of_changeset_update_property(struct of_changeset *ocs,
{
return of_changeset_action(ocs, OF_RECONFIG_UPDATE_PROPERTY, np, prop);
}
+
+struct device_node *of_changeset_create_node(struct of_changeset *ocs,
+ struct device_node *parent,
+ const char *full_name);
+int of_changeset_add_prop_string(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name, const char *str);
+int of_changeset_add_prop_string_array(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const char **str_array, size_t sz);
+int of_changeset_add_prop_u32_array(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const u32 *array, size_t sz);
+static inline int of_changeset_add_prop_u32(struct of_changeset *ocs,
+ struct device_node *np,
+ const char *prop_name,
+ const u32 val)
+{
+ return of_changeset_add_prop_u32_array(ocs, np, prop_name, &val, 1);
+}
+
#else /* CONFIG_OF_DYNAMIC */
static inline int of_reconfig_notifier_register(struct notifier_block *nb)
{
@@ -1645,7 +1667,7 @@ struct of_overlay_notify_data {
#ifdef CONFIG_OF_OVERLAY
int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id);
+ int *ovcs_id, struct device_node *target_base);
int of_overlay_remove(int *ovcs_id);
int of_overlay_remove_all(void);
@@ -1654,8 +1676,8 @@ int of_overlay_notifier_unregister(struct notifier_block *nb);
#else
-static inline int of_overlay_fdt_apply(void *overlay_fdt, u32 overlay_fdt_size,
- int *ovcs_id)
+static inline int of_overlay_fdt_apply(const void *overlay_fdt, u32 overlay_fdt_size,
+ int *ovcs_id, struct device_node *target_base)
{
return -ENOTSUPP;
}
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index d8045bcfc35e..fadfea575485 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -127,10 +127,4 @@ static inline int devm_of_platform_populate(struct device *dev)
static inline void devm_of_platform_depopulate(struct device *dev) { }
#endif
-#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
-extern void of_platform_register_reconfig_notifier(void);
-#else
-static inline void of_platform_register_reconfig_notifier(void) { }
-#endif
-
#endif /* _LINUX_OF_PLATFORM_H */
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index 0f4a8903922a..f86a08ba0207 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -67,6 +67,7 @@ enum OID {
OID_msOutlookExpress, /* 1.3.6.1.4.1.311.16.4 */
OID_ntlmssp, /* 1.3.6.1.4.1.311.2.2.10 */
+ OID_negoex, /* 1.3.6.1.4.1.311.2.2.30 */
OID_spnego, /* 1.3.6.1.5.5.2 */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 92a2063a0a23..5c02720c53a5 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -99,13 +99,15 @@
*/
enum pageflags {
PG_locked, /* Page is locked. Don't touch. */
+ PG_writeback, /* Page is under writeback */
PG_referenced,
PG_uptodate,
PG_dirty,
PG_lru,
+ PG_head, /* Must be in bit 6 */
+ PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_active,
PG_workingset,
- PG_waiters, /* Page has waiters, check its waitqueue. Must be bit #7 and in the same byte as "PG_locked" */
PG_error,
PG_slab,
PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
@@ -113,8 +115,6 @@ enum pageflags {
PG_reserved,
PG_private, /* If pagecache, has fs-private data */
PG_private_2, /* If pagecache, has fs aux data */
- PG_writeback, /* Page is under writeback */
- PG_head, /* A head page */
PG_mappedtodisk, /* Has blocks allocated on-disk */
PG_reclaim, /* To be reclaimed asap */
PG_swapbacked, /* Page is backed by RAM/swap */
@@ -171,15 +171,6 @@ enum pageflags {
/* Remapped by swiotlb-xen. */
PG_xen_remapped = PG_owner_priv_1,
-#ifdef CONFIG_MEMORY_FAILURE
- /*
- * Compound pages. Stored in first tail page's flags.
- * Indicates that at least one subpage is hwpoisoned in the
- * THP.
- */
- PG_has_hwpoisoned = PG_error,
-#endif
-
/* non-lru isolated movable page */
PG_isolated = PG_reclaim,
@@ -190,6 +181,17 @@ enum pageflags {
/* For self-hosted memmap pages */
PG_vmemmap_self_hosted = PG_owner_priv_1,
#endif
+
+ /*
+ * Flags only valid for compound pages. Stored in first tail page's
+ * flags word. Cannot use the first 8 flags or any flag marked as
+ * PF_ANY.
+ */
+
+ /* At least one page in this folio has the hwpoison flag set */
+ PG_has_hwpoisoned = PG_error,
+ PG_hugetlb = PG_active,
+ PG_large_rmappable = PG_workingset, /* anon or file-backed */
};
#define PAGEFLAGS_MASK ((1UL << NR_PAGEFLAGS) - 1)
@@ -806,13 +808,32 @@ static inline void ClearPageCompound(struct page *page)
BUG_ON(!PageHead(page));
ClearPageHead(page);
}
+PAGEFLAG(LargeRmappable, large_rmappable, PF_SECOND)
+#else
+TESTPAGEFLAG_FALSE(LargeRmappable, large_rmappable)
#endif
#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_HUGETLB_PAGE
int PageHuge(struct page *page);
-bool folio_test_hugetlb(struct folio *folio);
+SETPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
+CLEARPAGEFLAG(HugeTLB, hugetlb, PF_SECOND)
+
+/**
+ * folio_test_hugetlb - Determine if the folio belongs to hugetlbfs
+ * @folio: The folio to test.
+ *
+ * Context: Any context. Caller should have a reference on the folio to
+ * prevent it from being turned into a tail page.
+ * Return: True for hugetlbfs folios, false for anon folios or folios
+ * belonging to other filesystems.
+ */
+static inline bool folio_test_hugetlb(struct folio *folio)
+{
+ return folio_test_large(folio) &&
+ test_bit(PG_hugetlb, folio_flags(folio, 1));
+}
#else
TESTPAGEFLAG_FALSE(Huge, hugetlb)
#endif
@@ -832,11 +853,6 @@ static inline int PageTransHuge(struct page *page)
return PageHead(page);
}
-static inline bool folio_test_transhuge(struct folio *folio)
-{
- return folio_test_head(folio);
-}
-
/*
* PageTransCompound returns true for both transparent huge pages
* and hugetlbfs pages, so it should only be called when it's known
@@ -908,6 +924,8 @@ static inline bool is_page_hwpoison(struct page *page)
#define PageType(page, flag) \
((page->page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
+#define folio_test_type(folio, flag) \
+ ((folio->page.page_type & (PAGE_TYPE_BASE | flag)) == PAGE_TYPE_BASE)
static inline int page_type_has_type(unsigned int page_type)
{
@@ -919,27 +937,41 @@ static inline int page_has_type(struct page *page)
return page_type_has_type(page->page_type);
}
-#define PAGE_TYPE_OPS(uname, lname) \
-static __always_inline int Page##uname(struct page *page) \
+#define PAGE_TYPE_OPS(uname, lname, fname) \
+static __always_inline int Page##uname(const struct page *page) \
{ \
return PageType(page, PG_##lname); \
} \
+static __always_inline int folio_test_##fname(const struct folio *folio)\
+{ \
+ return folio_test_type(folio, PG_##lname); \
+} \
static __always_inline void __SetPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!PageType(page, 0), page); \
page->page_type &= ~PG_##lname; \
} \
+static __always_inline void __folio_set_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_type(folio, 0), folio); \
+ folio->page.page_type &= ~PG_##lname; \
+} \
static __always_inline void __ClearPage##uname(struct page *page) \
{ \
VM_BUG_ON_PAGE(!Page##uname(page), page); \
page->page_type |= PG_##lname; \
-}
+} \
+static __always_inline void __folio_clear_##fname(struct folio *folio) \
+{ \
+ VM_BUG_ON_FOLIO(!folio_test_##fname(folio), folio); \
+ folio->page.page_type |= PG_##lname; \
+} \
/*
* PageBuddy() indicates that the page is free and in the buddy system
* (see mm/page_alloc.c).
*/
-PAGE_TYPE_OPS(Buddy, buddy)
+PAGE_TYPE_OPS(Buddy, buddy, buddy)
/*
* PageOffline() indicates that the page is logically offline although the
@@ -963,7 +995,7 @@ PAGE_TYPE_OPS(Buddy, buddy)
* pages should check PageOffline() and synchronize with such drivers using
* page_offline_freeze()/page_offline_thaw().
*/
-PAGE_TYPE_OPS(Offline, offline)
+PAGE_TYPE_OPS(Offline, offline, offline)
extern void page_offline_freeze(void);
extern void page_offline_thaw(void);
@@ -973,12 +1005,12 @@ extern void page_offline_end(void);
/*
* Marks pages in use as page tables.
*/
-PAGE_TYPE_OPS(Table, table)
+PAGE_TYPE_OPS(Table, table, pgtable)
/*
* Marks guardpages used with debug_pagealloc.
*/
-PAGE_TYPE_OPS(Guard, guard)
+PAGE_TYPE_OPS(Guard, guard, guard)
extern bool is_free_buddy_page(struct page *page);
@@ -1040,6 +1072,14 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
#define PAGE_FLAGS_CHECK_AT_PREP \
((PAGEFLAGS_MASK & ~__PG_HWPOISON) | LRU_GEN_MASK | LRU_REFS_MASK)
+/*
+ * Flags stored in the second page of a compound page. They may overlap
+ * the CHECK_AT_FREE flags above, so need to be cleared.
+ */
+#define PAGE_FLAGS_SECOND \
+ (0xffUL /* order */ | 1UL << PG_has_hwpoisoned | \
+ 1UL << PG_hugetlb | 1UL << PG_large_rmappable)
+
#define PAGE_FLAGS_PRIVATE \
(1UL << PG_private | 1UL << PG_private_2)
/**
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index 67314f648aeb..be98564191e6 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -8,6 +8,7 @@
struct pglist_data;
+#ifdef CONFIG_PAGE_EXTENSION
/**
* struct page_ext_operations - per page_ext client operations
* @offset: Offset to the client's data within page_ext. Offset is returned to
@@ -29,8 +30,6 @@ struct page_ext_operations {
bool need_shared_flags;
};
-#ifdef CONFIG_PAGE_EXTENSION
-
/*
* The page_ext_flags users must set need_shared_flags to true.
*/
@@ -82,6 +81,12 @@ static inline void page_ext_init(void)
extern struct page_ext *page_ext_get(struct page *page);
extern void page_ext_put(struct page_ext *page_ext);
+static inline void *page_ext_data(struct page_ext *page_ext,
+ struct page_ext_operations *ops)
+{
+ return (void *)(page_ext) + ops->offset;
+}
+
static inline struct page_ext *page_ext_next(struct page_ext *curr)
{
void *next = curr;
diff --git a/include/linux/page_idle.h b/include/linux/page_idle.h
index 5cb7bd2078ec..d8f344840643 100644
--- a/include/linux/page_idle.h
+++ b/include/linux/page_idle.h
@@ -144,9 +144,4 @@ static inline void set_page_idle(struct page *page)
{
folio_set_idle(page_folio(page));
}
-
-static inline void clear_page_idle(struct page *page)
-{
- folio_clear_idle(page_folio(page));
-}
#endif /* _LINUX_MM_PAGE_IDLE_H */
diff --git a/include/linux/page_table_check.h b/include/linux/page_table_check.h
index 01e16c7696ec..6722941c7cb8 100644
--- a/include/linux/page_table_check.h
+++ b/include/linux/page_table_check.h
@@ -14,18 +14,13 @@ extern struct static_key_true page_table_check_disabled;
extern struct page_ext_operations page_table_check_ops;
void __page_table_check_zero(struct page *page, unsigned int order);
-void __page_table_check_pte_clear(struct mm_struct *mm, unsigned long addr,
- pte_t pte);
-void __page_table_check_pmd_clear(struct mm_struct *mm, unsigned long addr,
- pmd_t pmd);
-void __page_table_check_pud_clear(struct mm_struct *mm, unsigned long addr,
- pud_t pud);
-void __page_table_check_pte_set(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte);
-void __page_table_check_pmd_set(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, pmd_t pmd);
-void __page_table_check_pud_set(struct mm_struct *mm, unsigned long addr,
- pud_t *pudp, pud_t pud);
+void __page_table_check_pte_clear(struct mm_struct *mm, pte_t pte);
+void __page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd);
+void __page_table_check_pud_clear(struct mm_struct *mm, pud_t pud);
+void __page_table_check_ptes_set(struct mm_struct *mm, pte_t *ptep, pte_t pte,
+ unsigned int nr);
+void __page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp, pmd_t pmd);
+void __page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp, pud_t pud);
void __page_table_check_pte_clear_range(struct mm_struct *mm,
unsigned long addr,
pmd_t pmd);
@@ -46,61 +41,55 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
__page_table_check_zero(page, order);
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm,
- unsigned long addr, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pte_clear(mm, addr, pte);
+ __page_table_check_pte_clear(mm, pte);
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm,
- unsigned long addr, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_clear(mm, addr, pmd);
+ __page_table_check_pmd_clear(mm, pmd);
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm,
- unsigned long addr, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_clear(mm, addr, pud);
+ __page_table_check_pud_clear(mm, pud);
}
-static inline void page_table_check_pte_set(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep,
- pte_t pte)
+static inline void page_table_check_ptes_set(struct mm_struct *mm,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pte_set(mm, addr, ptep, pte);
+ __page_table_check_ptes_set(mm, ptep, pte, nr);
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm,
- unsigned long addr, pmd_t *pmdp,
+static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
pmd_t pmd)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pmd_set(mm, addr, pmdp, pmd);
+ __page_table_check_pmd_set(mm, pmdp, pmd);
}
-static inline void page_table_check_pud_set(struct mm_struct *mm,
- unsigned long addr, pud_t *pudp,
+static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
pud_t pud)
{
if (static_branch_likely(&page_table_check_disabled))
return;
- __page_table_check_pud_set(mm, addr, pudp, pud);
+ __page_table_check_pud_set(mm, pudp, pud);
}
static inline void page_table_check_pte_clear_range(struct mm_struct *mm,
@@ -123,35 +112,29 @@ static inline void page_table_check_free(struct page *page, unsigned int order)
{
}
-static inline void page_table_check_pte_clear(struct mm_struct *mm,
- unsigned long addr, pte_t pte)
+static inline void page_table_check_pte_clear(struct mm_struct *mm, pte_t pte)
{
}
-static inline void page_table_check_pmd_clear(struct mm_struct *mm,
- unsigned long addr, pmd_t pmd)
+static inline void page_table_check_pmd_clear(struct mm_struct *mm, pmd_t pmd)
{
}
-static inline void page_table_check_pud_clear(struct mm_struct *mm,
- unsigned long addr, pud_t pud)
+static inline void page_table_check_pud_clear(struct mm_struct *mm, pud_t pud)
{
}
-static inline void page_table_check_pte_set(struct mm_struct *mm,
- unsigned long addr, pte_t *ptep,
- pte_t pte)
+static inline void page_table_check_ptes_set(struct mm_struct *mm,
+ pte_t *ptep, pte_t pte, unsigned int nr)
{
}
-static inline void page_table_check_pmd_set(struct mm_struct *mm,
- unsigned long addr, pmd_t *pmdp,
+static inline void page_table_check_pmd_set(struct mm_struct *mm, pmd_t *pmdp,
pmd_t pmd)
{
}
-static inline void page_table_check_pud_set(struct mm_struct *mm,
- unsigned long addr, pud_t *pudp,
+static inline void page_table_check_pud_set(struct mm_struct *mm, pud_t *pudp,
pud_t pud)
{
}
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index d87840acbfb2..351c3b7f93a1 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -203,6 +203,7 @@ enum mapping_flags {
/* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5,
AS_LARGE_FOLIO_SUPPORT = 6,
+ AS_RELEASE_ALWAYS, /* Call ->release_folio(), even if no private data */
};
/**
@@ -273,6 +274,21 @@ static inline int mapping_use_writeback_tags(struct address_space *mapping)
return !test_bit(AS_NO_WRITEBACK_TAGS, &mapping->flags);
}
+static inline bool mapping_release_always(const struct address_space *mapping)
+{
+ return test_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline void mapping_set_release_always(struct address_space *mapping)
+{
+ set_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
+static inline void mapping_clear_release_always(struct address_space *mapping)
+{
+ clear_bit(AS_RELEASE_ALWAYS, &mapping->flags);
+}
+
static inline gfp_t mapping_gfp_mask(struct address_space * mapping)
{
return mapping->gfp_mask;
@@ -373,23 +389,31 @@ static inline struct address_space *folio_file_mapping(struct folio *folio)
return folio->mapping;
}
-static inline struct address_space *page_file_mapping(struct page *page)
-{
- return folio_file_mapping(page_folio(page));
-}
-
-/*
- * For file cache pages, return the address_space, otherwise return NULL
+/**
+ * folio_flush_mapping - Find the file mapping this folio belongs to.
+ * @folio: The folio.
+ *
+ * For folios which are in the page cache, return the mapping that this
+ * page belongs to. Anonymous folios return NULL, even if they're in
+ * the swap cache. Other kinds of folio also return NULL.
+ *
+ * This is ONLY used by architecture cache flushing code. If you aren't
+ * writing cache flushing code, you want either folio_mapping() or
+ * folio_file_mapping().
*/
-static inline struct address_space *page_mapping_file(struct page *page)
+static inline struct address_space *folio_flush_mapping(struct folio *folio)
{
- struct folio *folio = page_folio(page);
-
if (unlikely(folio_test_swapcache(folio)))
return NULL;
+
return folio_mapping(folio);
}
+static inline struct address_space *page_file_mapping(struct page *page)
+{
+ return folio_file_mapping(page_folio(page));
+}
+
/**
* folio_inode - Get the host inode for this folio.
* @folio: The folio.
@@ -960,8 +984,7 @@ static inline bool wake_page_match(struct wait_page_queue *wait_page,
void __folio_lock(struct folio *folio);
int __folio_lock_killable(struct folio *folio);
-bool __folio_lock_or_retry(struct folio *folio, struct mm_struct *mm,
- unsigned int flags);
+vm_fault_t __folio_lock_or_retry(struct folio *folio, struct vm_fault *vmf);
void unlock_page(struct page *page);
void folio_unlock(struct folio *folio);
@@ -1065,11 +1088,13 @@ static inline int folio_lock_killable(struct folio *folio)
* Return value and mmap_lock implications depend on flags; see
* __folio_lock_or_retry().
*/
-static inline bool folio_lock_or_retry(struct folio *folio,
- struct mm_struct *mm, unsigned int flags)
+static inline vm_fault_t folio_lock_or_retry(struct folio *folio,
+ struct vm_fault *vmf)
{
might_sleep();
- return folio_trylock(folio) || __folio_lock_or_retry(folio, mm, flags);
+ if (!folio_trylock(folio))
+ return __folio_lock_or_retry(folio, vmf);
+ return 0;
}
/*
@@ -1104,11 +1129,6 @@ static inline void wait_on_page_locked(struct page *page)
folio_wait_locked(page_folio(page));
}
-static inline int wait_on_page_locked_killable(struct page *page)
-{
- return folio_wait_locked_killable(page_folio(page));
-}
-
void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index c69a2cc1f412..8c7c2c3c6c65 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -23,7 +23,7 @@
#ifndef LINUX_PCI_H
#define LINUX_PCI_H
-
+#include <linux/args.h>
#include <linux/mod_devicetable.h>
#include <linux/types.h>
@@ -366,8 +366,8 @@ struct pci_dev {
pci_power_t current_state; /* Current operating state. In ACPI,
this is D0-D3, D0 being fully
functional, and D3 being off. */
- unsigned int imm_ready:1; /* Supports Immediate Readiness */
u8 pm_cap; /* PM capability offset */
+ unsigned int imm_ready:1; /* Supports Immediate Readiness */
unsigned int pme_support:5; /* Bitmask of states from which PME#
can be generated */
unsigned int pme_poll:1; /* Poll device's PME status bit */
@@ -392,9 +392,9 @@ struct pci_dev {
#ifdef CONFIG_PCIEASPM
struct pcie_link_state *link_state; /* ASPM link state */
+ u16 l1ss; /* L1SS Capability pointer */
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
- u16 l1ss; /* L1SS Capability pointer */
#endif
unsigned int pasid_no_tlp:1; /* PASID works without TLP Prefix */
unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
@@ -464,12 +464,13 @@ struct pci_dev {
unsigned int no_vf_scan:1; /* Don't scan for VFs after IOV enablement */
unsigned int no_command_memory:1; /* No PCI_COMMAND_MEMORY */
unsigned int rom_bar_overlap:1; /* ROM BAR disable broken */
+ unsigned int rom_attr_enabled:1; /* Display of ROM attribute enabled? */
pci_dev_flags_t dev_flags;
atomic_t enable_cnt; /* pci_enable_device has been called */
+ spinlock_t pcie_cap_lock; /* Protects RMW ops in capability accessors */
u32 saved_config_space[16]; /* Config space saved at suspend time */
struct hlist_head saved_cap_space;
- int rom_attr_enabled; /* Display of ROM attribute enabled? */
struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */
struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */
@@ -1217,11 +1218,40 @@ int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val);
int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val);
-int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
- u16 clear, u16 set);
+int pcie_capability_clear_and_set_word_unlocked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
+int pcie_capability_clear_and_set_word_locked(struct pci_dev *dev, int pos,
+ u16 clear, u16 set);
int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
u32 clear, u32 set);
+/**
+ * pcie_capability_clear_and_set_word - RMW accessor for PCI Express Capability Registers
+ * @dev: PCI device structure of the PCI Express device
+ * @pos: PCI Express Capability Register
+ * @clear: Clear bitmask
+ * @set: Set bitmask
+ *
+ * Perform a Read-Modify-Write (RMW) operation using @clear and @set
+ * bitmasks on PCI Express Capability Register at @pos. Certain PCI Express
+ * Capability Registers are accessed concurrently in RMW fashion, hence
+ * require locking which is handled transparently to the caller.
+ */
+static inline int pcie_capability_clear_and_set_word(struct pci_dev *dev,
+ int pos,
+ u16 clear, u16 set)
+{
+ switch (pos) {
+ case PCI_EXP_LNKCTL:
+ case PCI_EXP_RTCTL:
+ return pcie_capability_clear_and_set_word_locked(dev, pos,
+ clear, set);
+ default:
+ return pcie_capability_clear_and_set_word_unlocked(dev, pos,
+ clear, set);
+ }
+}
+
static inline int pcie_capability_set_word(struct pci_dev *dev, int pos,
u16 set)
{
@@ -1403,7 +1433,6 @@ void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus);
int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type);
-void pdev_enable_device(struct pci_dev *);
int pci_enable_resources(struct pci_dev *, int mask);
void pci_assign_irq(struct pci_dev *dev);
struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
@@ -2260,6 +2289,11 @@ int pcibios_alloc_irq(struct pci_dev *dev);
void pcibios_free_irq(struct pci_dev *dev);
resource_size_t pcibios_default_alignment(void);
+#if !defined(HAVE_PCI_MMAP) && !defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)
+extern int pci_create_resource_files(struct pci_dev *dev);
+extern void pci_remove_resource_files(struct pci_dev *dev);
+#endif
+
#if defined(CONFIG_PCI_MMCONFIG) || defined(CONFIG_ACPI_MCFG)
void __init pci_mmcfg_early_init(void);
void __init pci_mmcfg_late_init(void);
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 8f9a459e1671..5fb3d4c393a9 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1762,6 +1762,10 @@
#define PCI_SUBDEVICE_ID_AT_2700FX 0x2701
#define PCI_SUBDEVICE_ID_AT_2701FX 0x2703
+#define PCI_VENDOR_ID_ASIX 0x125b
+#define PCI_DEVICE_ID_ASIX_AX99100 0x9100
+#define PCI_DEVICE_ID_ASIX_AX99100_LB 0x9110
+
#define PCI_VENDOR_ID_ESS 0x125d
#define PCI_DEVICE_ID_ESS_ESS1968 0x1968
#define PCI_DEVICE_ID_ESS_ESS1978 0x1978
@@ -2646,6 +2650,7 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
+#define PCI_DEVICE_ID_INTEL_HDA_CML_LP 0x02c8
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
@@ -2661,8 +2666,10 @@
#define PCI_DEVICE_ID_INTEL_82424 0x0483
#define PCI_DEVICE_ID_INTEL_82378 0x0484
#define PCI_DEVICE_ID_INTEL_82425 0x0486
+#define PCI_DEVICE_ID_INTEL_HDA_CML_H 0x06c8
#define PCI_DEVICE_ID_INTEL_MRST_SD0 0x0807
#define PCI_DEVICE_ID_INTEL_MRST_SD1 0x0808
+#define PCI_DEVICE_ID_INTEL_HDA_OAKTRAIL 0x080a
#define PCI_DEVICE_ID_INTEL_MFD_SD 0x0820
#define PCI_DEVICE_ID_INTEL_MFD_SDIO1 0x0821
#define PCI_DEVICE_ID_INTEL_MFD_SDIO2 0x0822
@@ -2672,15 +2679,19 @@
#define PCI_DEVICE_ID_INTEL_QUARK_X1000_ILB 0x095e
#define PCI_DEVICE_ID_INTEL_I960 0x0960
#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_0 0x0a0c
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_2 0x0c0c
#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
+#define PCI_DEVICE_ID_INTEL_HDA_HSW_3 0x0d0c
+#define PCI_DEVICE_ID_INTEL_HDA_BYT 0x0f04
+#define PCI_DEVICE_ID_INTEL_SST_BYT 0x0f28
#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108f
#define PCI_DEVICE_ID_INTEL_82815_MC 0x1130
#define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132
+#define PCI_DEVICE_ID_INTEL_SST_TNG 0x119a
#define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221
-#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
-#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82437 0x122d
#define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e
#define PCI_DEVICE_ID_INTEL_82371FB_1 0x1230
@@ -2706,20 +2717,26 @@
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE 0x1576
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_NHI 0x1577
#define PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_BRIDGE 0x1578
+#define PCI_DEVICE_ID_INTEL_HDA_BDW 0x160c
#define PCI_DEVICE_ID_INTEL_80960_RP 0x1960
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX 0x19e2
#define PCI_DEVICE_ID_INTEL_QAT_C3XXX_VF 0x19e3
#define PCI_DEVICE_ID_INTEL_82840_HB 0x1a21
#define PCI_DEVICE_ID_INTEL_82845_HB 0x1a30
#define PCI_DEVICE_ID_INTEL_IOAT 0x1a38
+#define PCI_DEVICE_ID_INTEL_HDA_CPT 0x1c20
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN 0x1c41
#define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX 0x1c5f
+#define PCI_DEVICE_ID_INTEL_HDA_PBG 0x1d20
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_0 0x1d40
#define PCI_DEVICE_ID_INTEL_PATSBURG_LPC_1 0x1d41
+#define PCI_DEVICE_ID_INTEL_HDA_PPT 0x1e20
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MIN 0x1e40
#define PCI_DEVICE_ID_INTEL_PANTHERPOINT_LPC_MAX 0x1e5f
#define PCI_DEVICE_ID_INTEL_VMD_201D 0x201d
+#define PCI_DEVICE_ID_INTEL_HDA_BSW 0x2284
+#define PCI_DEVICE_ID_INTEL_SST_BSW 0x22a8
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MIN 0x2310
#define PCI_DEVICE_ID_INTEL_DH89XXCC_LPC_MAX 0x231f
#define PCI_DEVICE_ID_INTEL_82801AA_0 0x2410
@@ -2774,6 +2791,8 @@
#define PCI_DEVICE_ID_INTEL_82850_HB 0x2530
#define PCI_DEVICE_ID_INTEL_82860_HB 0x2531
#define PCI_DEVICE_ID_INTEL_E7501_MCH 0x254c
+#define PCI_DEVICE_ID_INTEL_7505_0 0x2550
+#define PCI_DEVICE_ID_INTEL_7205_0 0x255d
#define PCI_DEVICE_ID_INTEL_82845G_HB 0x2560
#define PCI_DEVICE_ID_INTEL_82845G_IG 0x2562
#define PCI_DEVICE_ID_INTEL_82865_HB 0x2570
@@ -2795,12 +2814,14 @@
#define PCI_DEVICE_ID_INTEL_ICH6_0 0x2640
#define PCI_DEVICE_ID_INTEL_ICH6_1 0x2641
#define PCI_DEVICE_ID_INTEL_ICH6_2 0x2642
+#define PCI_DEVICE_ID_INTEL_HDA_ICH6 0x2668
#define PCI_DEVICE_ID_INTEL_ICH6_16 0x266a
#define PCI_DEVICE_ID_INTEL_ICH6_17 0x266d
#define PCI_DEVICE_ID_INTEL_ICH6_18 0x266e
#define PCI_DEVICE_ID_INTEL_ICH6_19 0x266f
#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
#define PCI_DEVICE_ID_INTEL_ESB2_14 0x2698
+#define PCI_DEVICE_ID_INTEL_HDA_ESB2 0x269a
#define PCI_DEVICE_ID_INTEL_ESB2_17 0x269b
#define PCI_DEVICE_ID_INTEL_ESB2_18 0x269e
#define PCI_DEVICE_ID_INTEL_82945G_HB 0x2770
@@ -2808,11 +2829,12 @@
#define PCI_DEVICE_ID_INTEL_3000_HB 0x2778
#define PCI_DEVICE_ID_INTEL_82945GM_HB 0x27a0
#define PCI_DEVICE_ID_INTEL_82945GM_IG 0x27a2
+#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
-#define PCI_DEVICE_ID_INTEL_ICH7_30 0x27b0
#define PCI_DEVICE_ID_INTEL_TGP_LPC 0x27bc
#define PCI_DEVICE_ID_INTEL_ICH7_31 0x27bd
+#define PCI_DEVICE_ID_INTEL_HDA_ICH7 0x27d8
#define PCI_DEVICE_ID_INTEL_ICH7_17 0x27da
#define PCI_DEVICE_ID_INTEL_ICH7_19 0x27dd
#define PCI_DEVICE_ID_INTEL_ICH7_20 0x27de
@@ -2823,17 +2845,20 @@
#define PCI_DEVICE_ID_INTEL_ICH8_3 0x2814
#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
#define PCI_DEVICE_ID_INTEL_ICH8_5 0x283e
+#define PCI_DEVICE_ID_INTEL_HDA_ICH8 0x284b
#define PCI_DEVICE_ID_INTEL_ICH8_6 0x2850
#define PCI_DEVICE_ID_INTEL_VMD_28C0 0x28c0
#define PCI_DEVICE_ID_INTEL_ICH9_0 0x2910
-#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_2 0x2912
#define PCI_DEVICE_ID_INTEL_ICH9_3 0x2913
#define PCI_DEVICE_ID_INTEL_ICH9_4 0x2914
-#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
-#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
#define PCI_DEVICE_ID_INTEL_ICH9_7 0x2916
+#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
#define PCI_DEVICE_ID_INTEL_ICH9_8 0x2918
+#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+#define PCI_DEVICE_ID_INTEL_ICH9_6 0x2930
+#define PCI_DEVICE_ID_INTEL_HDA_ICH9_0 0x293e
+#define PCI_DEVICE_ID_INTEL_HDA_ICH9_1 0x293f
#define PCI_DEVICE_ID_INTEL_I7_MCR 0x2c18
#define PCI_DEVICE_ID_INTEL_I7_MC_TAD 0x2c19
#define PCI_DEVICE_ID_INTEL_I7_MC_RAS 0x2c1a
@@ -2850,8 +2875,8 @@
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_ADDR 0x2c31
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_RANK 0x2c32
#define PCI_DEVICE_ID_INTEL_I7_MC_CH2_TC 0x2c33
-#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
#define PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT 0x2c40
+#define PCI_DEVICE_ID_INTEL_I7_NONCORE 0x2c41
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE 0x2c50
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT 0x2c51
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_REV2 0x2c70
@@ -2885,6 +2910,7 @@
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_ADDR_REV2 0x2db1
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_RANK_REV2 0x2db2
#define PCI_DEVICE_ID_INTEL_LYNNFIELD_MC_CH2_TC_REV2 0x2db3
+#define PCI_DEVICE_ID_INTEL_HDA_GML 0x3198
#define PCI_DEVICE_ID_INTEL_82855PM_HB 0x3340
#define PCI_DEVICE_ID_INTEL_IOAT_TBG4 0x3429
#define PCI_DEVICE_ID_INTEL_IOAT_TBG5 0x342a
@@ -2895,12 +2921,13 @@
#define PCI_DEVICE_ID_INTEL_IOAT_TBG1 0x3431
#define PCI_DEVICE_ID_INTEL_IOAT_TBG2 0x3432
#define PCI_DEVICE_ID_INTEL_IOAT_TBG3 0x3433
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_LP 0x34c8
#define PCI_DEVICE_ID_INTEL_82830_HB 0x3575
#define PCI_DEVICE_ID_INTEL_82830_CGC 0x3577
-#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
-#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_82855GM_HB 0x3580
#define PCI_DEVICE_ID_INTEL_82855GM_IG 0x3582
+#define PCI_DEVICE_ID_INTEL_82854_HB 0x358c
+#define PCI_DEVICE_ID_INTEL_82854_IG 0x358e
#define PCI_DEVICE_ID_INTEL_E7520_MCH 0x3590
#define PCI_DEVICE_ID_INTEL_E7320_MCH 0x3592
#define PCI_DEVICE_ID_INTEL_MCH_PA 0x3595
@@ -2910,11 +2937,11 @@
#define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599
#define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a
#define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e
+#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
+#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c
#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f
#define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610
-#define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b
-#define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c
#define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710
#define PCI_DEVICE_ID_INTEL_IOAT_JSF1 0x3711
#define PCI_DEVICE_ID_INTEL_IOAT_JSF2 0x3712
@@ -2927,14 +2954,19 @@
#define PCI_DEVICE_ID_INTEL_IOAT_JSF9 0x3719
#define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8
#define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_N 0x38c8
#define PCI_DEVICE_ID_INTEL_ICH10_0 0x3a14
#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
#define PCI_DEVICE_ID_INTEL_ICH10_2 0x3a18
#define PCI_DEVICE_ID_INTEL_ICH10_3 0x3a1a
#define PCI_DEVICE_ID_INTEL_ICH10_4 0x3a30
+#define PCI_DEVICE_ID_INTEL_HDA_ICH10_0 0x3a3e
#define PCI_DEVICE_ID_INTEL_ICH10_5 0x3a60
+#define PCI_DEVICE_ID_INTEL_HDA_ICH10_1 0x3a6e
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MIN 0x3b00
#define PCI_DEVICE_ID_INTEL_5_3400_SERIES_LPC_MAX 0x3b1f
+#define PCI_DEVICE_ID_INTEL_HDA_5_3400_SERIES_0 0x3b56
+#define PCI_DEVICE_ID_INTEL_HDA_5_3400_SERIES_1 0x3b57
#define PCI_DEVICE_ID_INTEL_IOAT_SNB0 0x3c20
#define PCI_DEVICE_ID_INTEL_IOAT_SNB1 0x3c21
#define PCI_DEVICE_ID_INTEL_IOAT_SNB2 0x3c22
@@ -2945,16 +2977,12 @@
#define PCI_DEVICE_ID_INTEL_IOAT_SNB7 0x3c27
#define PCI_DEVICE_ID_INTEL_IOAT_SNB8 0x3c2e
#define PCI_DEVICE_ID_INTEL_IOAT_SNB9 0x3c2f
-#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
-#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
-#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
-#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
-#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
#define PCI_DEVICE_ID_INTEL_UNC_QPI0 0x3c41
#define PCI_DEVICE_ID_INTEL_UNC_QPI1 0x3c42
#define PCI_DEVICE_ID_INTEL_UNC_R2PCIE 0x3c43
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI0 0x3c44
#define PCI_DEVICE_ID_INTEL_UNC_R3QPI1 0x3c45
+#define PCI_DEVICE_ID_INTEL_UNC_HA 0x3c46
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_RAS 0x3c71 /* 15.1 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR0 0x3c72 /* 16.2 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_ERR1 0x3c73 /* 16.3 */
@@ -2966,17 +2994,40 @@
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD1 0x3cab /* 15.3 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD2 0x3cac /* 15.4 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_TAD3 0x3cad /* 15.5 */
+#define PCI_DEVICE_ID_INTEL_UNC_IMC0 0x3cb0
+#define PCI_DEVICE_ID_INTEL_UNC_IMC1 0x3cb1
+#define PCI_DEVICE_ID_INTEL_UNC_IMC2 0x3cb4
+#define PCI_DEVICE_ID_INTEL_UNC_IMC3 0x3cb5
#define PCI_DEVICE_ID_INTEL_SBRIDGE_IMC_DDRIO 0x3cb8 /* 17.0 */
#define PCI_DEVICE_ID_INTEL_JAKETOWN_UBOX 0x3ce0
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD0 0x3cf4 /* 12.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_BR 0x3cf5 /* 13.6 */
#define PCI_DEVICE_ID_INTEL_SBRIDGE_SAD1 0x3cf6 /* 12.7 */
+#define PCI_DEVICE_ID_INTEL_HDA_ICL_H 0x3dc8
#define PCI_DEVICE_ID_INTEL_IOAT_SNB 0x402f
#define PCI_DEVICE_ID_INTEL_5400_ERR 0x4030
#define PCI_DEVICE_ID_INTEL_5400_FBD0 0x4035
#define PCI_DEVICE_ID_INTEL_5400_FBD1 0x4036
+#define PCI_DEVICE_ID_INTEL_HDA_TGL_H 0x43c8
+#define PCI_DEVICE_ID_INTEL_HDA_DG1 0x490d
+#define PCI_DEVICE_ID_INTEL_HDA_EHL_0 0x4b55
+#define PCI_DEVICE_ID_INTEL_HDA_EHL_3 0x4b58
+#define PCI_DEVICE_ID_INTEL_HDA_JSL_N 0x4dc8
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_0 0x4f90
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_1 0x4f91
+#define PCI_DEVICE_ID_INTEL_HDA_DG2_2 0x4f92
#define PCI_DEVICE_ID_INTEL_EP80579_0 0x5031
#define PCI_DEVICE_ID_INTEL_EP80579_1 0x5032
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_P 0x51c8
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_PS 0x51c9
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_P_0 0x51ca
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_P_1 0x51cb
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_M 0x51cc
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_PX 0x51cd
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_M 0x51ce
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_PX 0x51cf
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_N 0x54c8
+#define PCI_DEVICE_ID_INTEL_HDA_APL 0x5a98
#define PCI_DEVICE_ID_INTEL_5100_16 0x65f0
#define PCI_DEVICE_ID_INTEL_5100_19 0x65f3
#define PCI_DEVICE_ID_INTEL_5100_21 0x65f5
@@ -3010,8 +3061,13 @@
#define PCI_DEVICE_ID_INTEL_82443GX_0 0x71a0
#define PCI_DEVICE_ID_INTEL_82443GX_2 0x71a2
#define PCI_DEVICE_ID_INTEL_82372FB_1 0x7601
+#define PCI_DEVICE_ID_INTEL_HDA_RPL_S 0x7a50
+#define PCI_DEVICE_ID_INTEL_HDA_ADL_S 0x7ad0
+#define PCI_DEVICE_ID_INTEL_HDA_MTL 0x7e28
+#define PCI_DEVICE_ID_INTEL_HDA_ARL_S 0x7f50
#define PCI_DEVICE_ID_INTEL_SCH_LPC 0x8119
#define PCI_DEVICE_ID_INTEL_SCH_IDE 0x811a
+#define PCI_DEVICE_ID_INTEL_HDA_POULSBO 0x811b
#define PCI_DEVICE_ID_INTEL_E6XX_CU 0x8183
#define PCI_DEVICE_ID_INTEL_ITC_LPC 0x8186
#define PCI_DEVICE_ID_INTEL_82454GX 0x84c4
@@ -3020,9 +3076,31 @@
#define PCI_DEVICE_ID_INTEL_82454NX 0x84cb
#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
+#define PCI_DEVICE_ID_INTEL_HDA_LPT 0x8c20
+#define PCI_DEVICE_ID_INTEL_HDA_9_SERIES 0x8ca0
+#define PCI_DEVICE_ID_INTEL_HDA_WBG_0 0x8d20
+#define PCI_DEVICE_ID_INTEL_HDA_WBG_1 0x8d21
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_HDA_LKF 0x98c8
#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
+#define PCI_DEVICE_ID_INTEL_HDA_LPT_LP_0 0x9c20
+#define PCI_DEVICE_ID_INTEL_HDA_LPT_LP_1 0x9c21
+#define PCI_DEVICE_ID_INTEL_HDA_WPT_LP 0x9ca0
+#define PCI_DEVICE_ID_INTEL_HDA_SKL_LP 0x9d70
+#define PCI_DEVICE_ID_INTEL_HDA_KBL_LP 0x9d71
+#define PCI_DEVICE_ID_INTEL_HDA_CNL_LP 0x9dc8
+#define PCI_DEVICE_ID_INTEL_HDA_TGL_LP 0xa0c8
+#define PCI_DEVICE_ID_INTEL_HDA_SKL 0xa170
+#define PCI_DEVICE_ID_INTEL_HDA_KBL 0xa171
+#define PCI_DEVICE_ID_INTEL_HDA_LBG_0 0xa1f0
+#define PCI_DEVICE_ID_INTEL_HDA_LBG_1 0xa270
+#define PCI_DEVICE_ID_INTEL_HDA_KBL_H 0xa2f0
+#define PCI_DEVICE_ID_INTEL_HDA_CNL_H 0xa348
+#define PCI_DEVICE_ID_INTEL_HDA_CML_S 0xa3f0
+#define PCI_DEVICE_ID_INTEL_HDA_LNL_P 0xa828
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
+#define PCI_DEVICE_ID_INTEL_HDA_CML_R 0xf0c8
+#define PCI_DEVICE_ID_INTEL_HDA_RKL_S 0xf1c8
#define PCI_VENDOR_ID_WANGXUN 0x8088
diff --git a/include/linux/pds/pds_adminq.h b/include/linux/pds/pds_adminq.h
index bcba7fda3cc9..4b4e9a98b37b 100644
--- a/include/linux/pds/pds_adminq.h
+++ b/include/linux/pds/pds_adminq.h
@@ -818,6 +818,367 @@ struct pds_vdpa_set_features_cmd {
__le64 features;
};
+#define PDS_LM_DEVICE_STATE_LENGTH 65536
+#define PDS_LM_CHECK_DEVICE_STATE_LENGTH(X) \
+ PDS_CORE_SIZE_CHECK(union, PDS_LM_DEVICE_STATE_LENGTH, X)
+
+/*
+ * enum pds_lm_cmd_opcode - Live Migration Device commands
+ */
+enum pds_lm_cmd_opcode {
+ PDS_LM_CMD_HOST_VF_STATUS = 1,
+
+ /* Device state commands */
+ PDS_LM_CMD_STATE_SIZE = 16,
+ PDS_LM_CMD_SUSPEND = 18,
+ PDS_LM_CMD_SUSPEND_STATUS = 19,
+ PDS_LM_CMD_RESUME = 20,
+ PDS_LM_CMD_SAVE = 21,
+ PDS_LM_CMD_RESTORE = 22,
+
+ /* Dirty page tracking commands */
+ PDS_LM_CMD_DIRTY_STATUS = 32,
+ PDS_LM_CMD_DIRTY_ENABLE = 33,
+ PDS_LM_CMD_DIRTY_DISABLE = 34,
+ PDS_LM_CMD_DIRTY_READ_SEQ = 35,
+ PDS_LM_CMD_DIRTY_WRITE_ACK = 36,
+};
+
+/**
+ * struct pds_lm_cmd - generic command
+ * @opcode: Opcode
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Structure padding to 60 Bytes
+ */
+struct pds_lm_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[56];
+};
+
+/**
+ * struct pds_lm_state_size_cmd - STATE_SIZE command
+ * @opcode: Opcode
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ */
+struct pds_lm_state_size_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_lm_state_size_comp - STATE_SIZE command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @size: Size of the device state
+ * @rsvd2: Word boundary padding
+ * @color: Color bit
+ */
+struct pds_lm_state_size_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ union {
+ __le64 size;
+ u8 rsvd2[11];
+ } __packed;
+ u8 color;
+};
+
+enum pds_lm_suspend_resume_type {
+ PDS_LM_SUSPEND_RESUME_TYPE_FULL = 0,
+ PDS_LM_SUSPEND_RESUME_TYPE_P2P = 1,
+};
+
+/**
+ * struct pds_lm_suspend_cmd - SUSPEND command
+ * @opcode: Opcode PDS_LM_CMD_SUSPEND
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of suspend (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_suspend_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_suspend_status_cmd - SUSPEND status command
+ * @opcode: Opcode PDS_AQ_CMD_LM_SUSPEND_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of suspend (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_suspend_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_resume_cmd - RESUME command
+ * @opcode: Opcode PDS_LM_CMD_RESUME
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @type: Type of resume (enum pds_lm_suspend_resume_type)
+ */
+struct pds_lm_resume_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 type;
+};
+
+/**
+ * struct pds_lm_sg_elem - Transmit scatter-gather (SG) descriptor element
+ * @addr: DMA address of SG element data buffer
+ * @len: Length of SG element data buffer, in bytes
+ * @rsvd: Word boundary padding
+ */
+struct pds_lm_sg_elem {
+ __le64 addr;
+ __le32 len;
+ __le16 rsvd[2];
+};
+
+/**
+ * struct pds_lm_save_cmd - SAVE command
+ * @opcode: Opcode PDS_LM_CMD_SAVE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: IOVA address of the SGL to dma the device state
+ * @num_sge: Total number of SG elements
+ */
+struct pds_lm_save_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[4];
+ __le64 sgl_addr;
+ __le32 num_sge;
+} __packed;
+
+/**
+ * struct pds_lm_restore_cmd - RESTORE command
+ * @opcode: Opcode PDS_LM_CMD_RESTORE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: IOVA address of the SGL to dma the device state
+ * @num_sge: Total number of SG elements
+ */
+struct pds_lm_restore_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 rsvd2[4];
+ __le64 sgl_addr;
+ __le32 num_sge;
+} __packed;
+
+/**
+ * union pds_lm_dev_state - device state information
+ * @words: Device state words
+ */
+union pds_lm_dev_state {
+ __le32 words[PDS_LM_DEVICE_STATE_LENGTH / sizeof(__le32)];
+};
+
+enum pds_lm_host_vf_status {
+ PDS_LM_STA_NONE = 0,
+ PDS_LM_STA_IN_PROGRESS,
+ PDS_LM_STA_MAX,
+};
+
+/**
+ * struct pds_lm_dirty_region_info - Memory region info for STATUS and ENABLE
+ * @dma_base: Base address of the DMA-contiguous memory region
+ * @page_count: Number of pages in the memory region
+ * @page_size_log2: Log2 page size in the memory region
+ * @rsvd: Word boundary padding
+ */
+struct pds_lm_dirty_region_info {
+ __le64 dma_base;
+ __le32 page_count;
+ u8 page_size_log2;
+ u8 rsvd[3];
+};
+
+/**
+ * struct pds_lm_dirty_status_cmd - DIRTY_STATUS command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @max_regions: Capacity of the region info buffer
+ * @rsvd2: Word boundary padding
+ * @regions_dma: DMA address of the region info buffer
+ *
+ * The minimum of max_regions (from the command) and num_regions (from the
+ * completion) of struct pds_lm_dirty_region_info will be written to
+ * regions_dma.
+ *
+ * The max_regions may be zero, in which case regions_dma is ignored. In that
+ * case, the completion will only report the maximum number of regions
+ * supported by the device, and the number of regions currently enabled.
+ */
+struct pds_lm_dirty_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 max_regions;
+ u8 rsvd2[3];
+ __le64 regions_dma;
+} __packed;
+
+/**
+ * enum pds_lm_dirty_bmp_type - Type of dirty page bitmap
+ * @PDS_LM_DIRTY_BMP_TYPE_NONE: No bitmap / disabled
+ * @PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK: Seq/Ack bitmap representation
+ */
+enum pds_lm_dirty_bmp_type {
+ PDS_LM_DIRTY_BMP_TYPE_NONE = 0,
+ PDS_LM_DIRTY_BMP_TYPE_SEQ_ACK = 1,
+};
+
+/**
+ * struct pds_lm_dirty_status_comp - STATUS command completion
+ * @status: Status of the command (enum pds_core_status_code)
+ * @rsvd: Word boundary padding
+ * @comp_index: Index in the desc ring for which this is the completion
+ * @max_regions: Maximum number of regions supported by the device
+ * @num_regions: Number of regions currently enabled
+ * @bmp_type: Type of dirty bitmap representation
+ * @rsvd2: Word boundary padding
+ * @bmp_type_mask: Mask of supported bitmap types, bit index per type
+ * @rsvd3: Word boundary padding
+ * @color: Color bit
+ *
+ * This completion descriptor is used for STATUS, ENABLE, and DISABLE.
+ */
+struct pds_lm_dirty_status_comp {
+ u8 status;
+ u8 rsvd;
+ __le16 comp_index;
+ u8 max_regions;
+ u8 num_regions;
+ u8 bmp_type;
+ u8 rsvd2;
+ __le32 bmp_type_mask;
+ u8 rsvd3[3];
+ u8 color;
+};
+
+/**
+ * struct pds_lm_dirty_enable_cmd - DIRTY_ENABLE command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_ENABLE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @bmp_type: Type of dirty bitmap representation
+ * @num_regions: Number of entries in the region info buffer
+ * @rsvd2: Word boundary padding
+ * @regions_dma: DMA address of the region info buffer
+ *
+ * The num_regions must be nonzero, and less than or equal to the maximum
+ * number of regions supported by the device.
+ *
+ * The memory regions should not overlap.
+ *
+ * The information should be initialized by the driver. The device may modify
+ * the information on successful completion, such as by size-aligning the
+ * number of pages in a region.
+ *
+ * The modified number of pages will be greater than or equal to the page count
+ * given in the enable command, and at least as coarsly aligned as the given
+ * value. For example, the count might be aligned to a multiple of 64, but
+ * if the value is already a multiple of 128 or higher, it will not change.
+ * If the driver requires its own minimum alignment of the number of pages, the
+ * driver should account for that already in the region info of this command.
+ *
+ * This command uses struct pds_lm_dirty_status_comp for its completion.
+ */
+struct pds_lm_dirty_enable_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 bmp_type;
+ u8 num_regions;
+ u8 rsvd2[2];
+ __le64 regions_dma;
+} __packed;
+
+/**
+ * struct pds_lm_dirty_disable_cmd - DIRTY_DISABLE command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_DISABLE
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ *
+ * Dirty page tracking will be disabled. This may be called in any state, as
+ * long as dirty page tracking is supported by the device, to ensure that dirty
+ * page tracking is disabled.
+ *
+ * This command uses struct pds_lm_dirty_status_comp for its completion. On
+ * success, num_regions will be zero.
+ */
+struct pds_lm_dirty_disable_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+};
+
+/**
+ * struct pds_lm_dirty_seq_ack_cmd - DIRTY_READ_SEQ or _WRITE_ACK command
+ * @opcode: Opcode PDS_LM_CMD_DIRTY_[READ_SEQ|WRITE_ACK]
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @off_bytes: Byte offset in the bitmap
+ * @len_bytes: Number of bytes to transfer
+ * @num_sge: Number of DMA scatter gather elements
+ * @rsvd2: Word boundary padding
+ * @sgl_addr: DMA address of scatter gather list
+ *
+ * Read bytes from the SEQ bitmap, or write bytes into the ACK bitmap.
+ *
+ * This command treats the entire bitmap as a byte buffer. It does not
+ * distinguish between guest memory regions. The driver should refer to the
+ * number of pages in each region, according to PDS_LM_CMD_DIRTY_STATUS, to
+ * determine the region boundaries in the bitmap. Each region will be
+ * represented by exactly the number of bits as the page count for that region,
+ * immediately following the last bit of the previous region.
+ */
+struct pds_lm_dirty_seq_ack_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ __le32 off_bytes;
+ __le32 len_bytes;
+ __le16 num_sge;
+ u8 rsvd2[2];
+ __le64 sgl_addr;
+} __packed;
+
+/**
+ * struct pds_lm_host_vf_status_cmd - HOST_VF_STATUS command
+ * @opcode: Opcode PDS_LM_CMD_HOST_VF_STATUS
+ * @rsvd: Word boundary padding
+ * @vf_id: VF id
+ * @status: Current LM status of host VF driver (enum pds_lm_host_status)
+ */
+struct pds_lm_host_vf_status_cmd {
+ u8 opcode;
+ u8 rsvd;
+ __le16 vf_id;
+ u8 status;
+};
+
union pds_core_adminq_cmd {
u8 opcode;
u8 bytes[64];
@@ -844,6 +1205,17 @@ union pds_core_adminq_cmd {
struct pds_vdpa_vq_init_cmd vdpa_vq_init;
struct pds_vdpa_vq_reset_cmd vdpa_vq_reset;
+ struct pds_lm_suspend_cmd lm_suspend;
+ struct pds_lm_suspend_status_cmd lm_suspend_status;
+ struct pds_lm_resume_cmd lm_resume;
+ struct pds_lm_state_size_cmd lm_state_size;
+ struct pds_lm_save_cmd lm_save;
+ struct pds_lm_restore_cmd lm_restore;
+ struct pds_lm_host_vf_status_cmd lm_host_vf_status;
+ struct pds_lm_dirty_status_cmd lm_dirty_status;
+ struct pds_lm_dirty_enable_cmd lm_dirty_enable;
+ struct pds_lm_dirty_disable_cmd lm_dirty_disable;
+ struct pds_lm_dirty_seq_ack_cmd lm_dirty_seq_ack;
};
union pds_core_adminq_comp {
@@ -868,6 +1240,9 @@ union pds_core_adminq_comp {
struct pds_vdpa_vq_init_comp vdpa_vq_init;
struct pds_vdpa_vq_reset_comp vdpa_vq_reset;
+
+ struct pds_lm_state_size_comp lm_state_size;
+ struct pds_lm_dirty_status_comp lm_dirty_status;
};
#ifndef __CHECKER__
diff --git a/include/linux/pds/pds_common.h b/include/linux/pds/pds_common.h
index 435c8e8161c2..30581e2e04cc 100644
--- a/include/linux/pds/pds_common.h
+++ b/include/linux/pds/pds_common.h
@@ -34,16 +34,19 @@ enum pds_core_vif_types {
#define PDS_DEV_TYPE_CORE_STR "Core"
#define PDS_DEV_TYPE_VDPA_STR "vDPA"
-#define PDS_DEV_TYPE_VFIO_STR "VFio"
+#define PDS_DEV_TYPE_VFIO_STR "vfio"
#define PDS_DEV_TYPE_ETH_STR "Eth"
#define PDS_DEV_TYPE_RDMA_STR "RDMA"
#define PDS_DEV_TYPE_LM_STR "LM"
#define PDS_VDPA_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_VDPA_STR
+#define PDS_VFIO_LM_DEV_NAME PDS_CORE_DRV_NAME "." PDS_DEV_TYPE_LM_STR "." PDS_DEV_TYPE_VFIO_STR
+
+struct pdsc;
int pdsc_register_notify(struct notifier_block *nb);
void pdsc_unregister_notify(struct notifier_block *nb);
void *pdsc_get_pf_struct(struct pci_dev *vf_pdev);
-int pds_client_register(struct pci_dev *pf_pdev, char *devname);
-int pds_client_unregister(struct pci_dev *pf_pdev, u16 client_id);
+int pds_client_register(struct pdsc *pf, char *devname);
+int pds_client_unregister(struct pdsc *pf, u16 client_id);
#endif /* _PDS_COMMON_H_ */
diff --git a/include/linux/peci.h b/include/linux/peci.h
index 06e6ef935297..9b3d36aff431 100644
--- a/include/linux/peci.h
+++ b/include/linux/peci.h
@@ -42,13 +42,13 @@ struct peci_controller_ops {
*/
struct peci_controller {
struct device dev;
- struct peci_controller_ops *ops;
+ const struct peci_controller_ops *ops;
struct mutex bus_lock; /* held for the duration of xfer */
u8 id;
};
struct peci_controller *devm_peci_controller_add(struct device *parent,
- struct peci_controller_ops *ops);
+ const struct peci_controller_ops *ops);
static inline struct peci_controller *to_peci_controller(void *d)
{
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index b3b458442330..68fac2e7cbe6 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -35,6 +35,12 @@
#define PCPU_BITMAP_BLOCK_BITS (PCPU_BITMAP_BLOCK_SIZE >> \
PCPU_MIN_ALLOC_SHIFT)
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+#define PERCPU_DYNAMIC_SIZE_SHIFT 12
+#else
+#define PERCPU_DYNAMIC_SIZE_SHIFT 10
+#endif
+
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
@@ -42,7 +48,7 @@
* for this. Keep PERCPU_DYNAMIC_RESERVE equal to or larger than
* PERCPU_DYNAMIC_EARLY_SIZE.
*/
-#define PERCPU_DYNAMIC_EARLY_SIZE (20 << 10)
+#define PERCPU_DYNAMIC_EARLY_SIZE (20 << PERCPU_DYNAMIC_SIZE_SHIFT)
/*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
@@ -56,9 +62,9 @@
* intelligent way to determine this would be nice.
*/
#if BITS_PER_LONG > 32
-#define PERCPU_DYNAMIC_RESERVE (28 << 10)
+#define PERCPU_DYNAMIC_RESERVE (28 << PERCPU_DYNAMIC_SIZE_SHIFT)
#else
-#define PERCPU_DYNAMIC_RESERVE (20 << 10)
+#define PERCPU_DYNAMIC_RESERVE (20 << PERCPU_DYNAMIC_SIZE_SHIFT)
#endif
extern void *pcpu_base_addr;
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index 75b73c83bc9d..d01351b1526f 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -30,17 +30,28 @@ struct percpu_counter {
extern int percpu_counter_batch;
-int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp,
- struct lock_class_key *key);
+int __percpu_counter_init_many(struct percpu_counter *fbc, s64 amount,
+ gfp_t gfp, u32 nr_counters,
+ struct lock_class_key *key);
-#define percpu_counter_init(fbc, value, gfp) \
+#define percpu_counter_init_many(fbc, value, gfp, nr_counters) \
({ \
static struct lock_class_key __key; \
\
- __percpu_counter_init(fbc, value, gfp, &__key); \
+ __percpu_counter_init_many(fbc, value, gfp, nr_counters,\
+ &__key); \
})
-void percpu_counter_destroy(struct percpu_counter *fbc);
+
+#define percpu_counter_init(fbc, value, gfp) \
+ percpu_counter_init_many(fbc, value, gfp, 1)
+
+void percpu_counter_destroy_many(struct percpu_counter *fbc, u32 nr_counters);
+static inline void percpu_counter_destroy(struct percpu_counter *fbc)
+{
+ percpu_counter_destroy_many(fbc, 1);
+}
+
void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
@@ -116,11 +127,27 @@ struct percpu_counter {
s64 count;
};
+static inline int percpu_counter_init_many(struct percpu_counter *fbc,
+ s64 amount, gfp_t gfp,
+ u32 nr_counters)
+{
+ u32 i;
+
+ for (i = 0; i < nr_counters; i++)
+ fbc[i].count = amount;
+
+ return 0;
+}
+
static inline int percpu_counter_init(struct percpu_counter *fbc, s64 amount,
gfp_t gfp)
{
- fbc->count = amount;
- return 0;
+ return percpu_counter_init_many(fbc, amount, gfp, 1);
+}
+
+static inline void percpu_counter_destroy_many(struct percpu_counter *fbc,
+ u32 nr_counters)
+{
}
static inline void percpu_counter_destroy(struct percpu_counter *fbc)
diff --git a/include/linux/perf/riscv_pmu.h b/include/linux/perf/riscv_pmu.h
index 43fc892aa7d9..43282e22ebe1 100644
--- a/include/linux/perf/riscv_pmu.h
+++ b/include/linux/perf/riscv_pmu.h
@@ -6,8 +6,8 @@
*
*/
-#ifndef _ASM_RISCV_PERF_EVENT_H
-#define _ASM_RISCV_PERF_EVENT_H
+#ifndef _RISCV_PMU_H
+#define _RISCV_PMU_H
#include <linux/perf_event.h>
#include <linux/ptrace.h>
@@ -21,7 +21,7 @@
#define RISCV_MAX_COUNTERS 64
#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
-#define RISCV_PMU_PDEV_NAME "riscv-pmu"
+#define RISCV_PMU_SBI_PDEV_NAME "riscv-pmu-sbi"
#define RISCV_PMU_LEGACY_PDEV_NAME "riscv-pmu-legacy"
#define RISCV_PMU_STOP_FLAG_RESET 1
@@ -55,6 +55,10 @@ struct riscv_pmu {
void (*ctr_start)(struct perf_event *event, u64 init_val);
void (*ctr_stop)(struct perf_event *event, unsigned long flag);
int (*event_map)(struct perf_event *event, u64 *config);
+ void (*event_init)(struct perf_event *event);
+ void (*event_mapped)(struct perf_event *event, struct mm_struct *mm);
+ void (*event_unmapped)(struct perf_event *event, struct mm_struct *mm);
+ uint8_t (*csr_index)(struct perf_event *event);
struct cpu_hw_events __percpu *hw_events;
struct hlist_node node;
@@ -81,4 +85,4 @@ int riscv_pmu_get_hpm_info(u32 *hw_ctr_width, u32 *num_hw_ctr);
#endif /* CONFIG_RISCV_PMU */
-#endif /* _ASM_RISCV_PERF_EVENT_H */
+#endif /* _RISCV_PMU_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 05253af70ce9..e85cd1c0eaf3 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -444,7 +444,8 @@ struct pmu {
/*
* Will return the value for perf_event_mmap_page::index for this event,
- * if no implementation is provided it will default to: event->hw.idx + 1.
+ * if no implementation is provided it will default to 0 (see
+ * perf_event_idx_default).
*/
int (*event_idx) (struct perf_event *event); /*optional */
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 5063b482e34f..1fba072b3dac 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -5,6 +5,9 @@
#include <linux/pfn.h>
#include <asm/pgtable.h>
+#define PMD_ORDER (PMD_SHIFT - PAGE_SHIFT)
+#define PUD_ORDER (PUD_SHIFT - PAGE_SHIFT)
+
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
@@ -63,7 +66,6 @@ static inline unsigned long pte_index(unsigned long address)
{
return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
}
-#define pte_index pte_index
#ifndef pmd_index
static inline unsigned long pmd_index(unsigned long address)
@@ -99,7 +101,7 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
((pte_t *)kmap_local_page(pmd_page(*(pmd))) + pte_index((address)))
#define pte_unmap(pte) do { \
kunmap_local((pte)); \
- /* rcu_read_unlock() to be added later */ \
+ rcu_read_unlock(); \
} while (0)
#else
static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
@@ -108,10 +110,12 @@ static inline pte_t *__pte_map(pmd_t *pmd, unsigned long address)
}
static inline void pte_unmap(pte_t *pte)
{
- /* rcu_read_unlock() to be added later */
+ rcu_read_unlock();
}
#endif
+void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable);
+
/* Find an entry in the second-level page table.. */
#ifndef pmd_offset
static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
@@ -180,6 +184,60 @@ static inline int pmd_young(pmd_t pmd)
}
#endif
+/*
+ * A facility to provide lazy MMU batching. This allows PTE updates and
+ * page invalidations to be delayed until a call to leave lazy MMU mode
+ * is issued. Some architectures may benefit from doing this, and it is
+ * beneficial for both shadow and direct mode hypervisors, which may batch
+ * the PTE updates which happen during this window. Note that using this
+ * interface requires that read hazards be removed from the code. A read
+ * hazard could result in the direct mode hypervisor case, since the actual
+ * write to the page tables may not yet have taken place, so reads though
+ * a raw PTE pointer after it has been modified are not guaranteed to be
+ * up to date. This mode can only be entered and left under the protection of
+ * the page table locks for all page tables which may be modified. In the UP
+ * case, this is required so that preemption is disabled, and in the SMP case,
+ * it must synchronize the delayed page table writes properly on other CPUs.
+ */
+#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
+#define arch_enter_lazy_mmu_mode() do {} while (0)
+#define arch_leave_lazy_mmu_mode() do {} while (0)
+#define arch_flush_lazy_mmu_mode() do {} while (0)
+#endif
+
+#ifndef set_ptes
+/**
+ * set_ptes - Map consecutive pages to a contiguous range of addresses.
+ * @mm: Address space to map the pages into.
+ * @addr: Address to map the first page at.
+ * @ptep: Page table pointer for the first entry.
+ * @pte: Page table entry for the first page.
+ * @nr: Number of pages to map.
+ *
+ * May be overridden by the architecture, or the architecture can define
+ * set_pte() and PFN_PTE_SHIFT.
+ *
+ * Context: The caller holds the page table lock. The pages all belong
+ * to the same folio. The PTEs are all in the same PMD.
+ */
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ page_table_check_ptes_set(mm, ptep, pte, nr);
+
+ arch_enter_lazy_mmu_mode();
+ for (;;) {
+ set_pte(ptep, pte);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+ }
+ arch_leave_lazy_mmu_mode();
+}
+#endif
+#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, addr, ptep, pte, 1)
+
#ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
extern int ptep_set_access_flags(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep,
@@ -313,6 +371,20 @@ static inline bool arch_has_hw_pte_young(void)
}
#endif
+#ifndef arch_check_zapped_pte
+static inline void arch_check_zapped_pte(struct vm_area_struct *vma,
+ pte_t pte)
+{
+}
+#endif
+
+#ifndef arch_check_zapped_pmd
+static inline void arch_check_zapped_pmd(struct vm_area_struct *vma,
+ pmd_t pmd)
+{
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long address,
@@ -320,7 +392,7 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
{
pte_t pte = ptep_get(ptep);
pte_clear(mm, address, ptep);
- page_table_check_pte_clear(mm, address, pte);
+ page_table_check_pte_clear(mm, pte);
return pte;
}
#endif
@@ -390,6 +462,7 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
return pmd;
}
#define pmdp_get_lockless pmdp_get_lockless
+#define pmdp_get_lockless_sync() tlb_remove_table_sync_one()
#endif /* CONFIG_PGTABLE_LEVELS > 2 */
#endif /* CONFIG_GUP_GET_PXX_LOW_HIGH */
@@ -408,6 +481,9 @@ static inline pmd_t pmdp_get_lockless(pmd_t *pmdp)
{
return pmdp_get(pmdp);
}
+static inline void pmdp_get_lockless_sync(void)
+{
+}
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -419,7 +495,7 @@ static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
pmd_t pmd = *pmdp;
pmd_clear(pmdp);
- page_table_check_pmd_clear(mm, address, pmd);
+ page_table_check_pmd_clear(mm, pmd);
return pmd;
}
@@ -432,7 +508,7 @@ static inline pud_t pudp_huge_get_and_clear(struct mm_struct *mm,
pud_t pud = *pudp;
pud_clear(pudp);
- page_table_check_pud_clear(mm, address, pud);
+ page_table_check_pud_clear(mm, pud);
return pud;
}
@@ -450,11 +526,11 @@ static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
#endif
#ifndef __HAVE_ARCH_PUDP_HUGE_GET_AND_CLEAR_FULL
-static inline pud_t pudp_huge_get_and_clear_full(struct mm_struct *mm,
+static inline pud_t pudp_huge_get_and_clear_full(struct vm_area_struct *vma,
unsigned long address, pud_t *pudp,
int full)
{
- return pudp_huge_get_and_clear(mm, address, pudp);
+ return pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
}
#endif
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
@@ -515,6 +591,20 @@ extern pud_t pudp_huge_clear_flush(struct vm_area_struct *vma,
pud_t *pudp);
#endif
+#ifndef pte_mkwrite
+static inline pte_t pte_mkwrite(pte_t pte, struct vm_area_struct *vma)
+{
+ return pte_mkwrite_novma(pte);
+}
+#endif
+
+#if defined(CONFIG_ARCH_WANT_PMD_MKWRITE) && !defined(pmd_mkwrite)
+static inline pmd_t pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
+{
+ return pmd_mkwrite_novma(pmd);
+}
+#endif
+
#ifndef __HAVE_ARCH_PTEP_SET_WRPROTECT
struct mm_struct;
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
@@ -558,6 +648,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
#endif
#ifndef __HAVE_ARCH_PUDP_SET_WRPROTECT
#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline void pudp_set_wrprotect(struct mm_struct *mm,
unsigned long address, pud_t *pudp)
{
@@ -571,6 +662,7 @@ static inline void pudp_set_wrprotect(struct mm_struct *mm,
{
BUILD_BUG();
}
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
#endif
@@ -693,11 +785,14 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
{
return pmd_val(pmd_a) == pmd_val(pmd_b);
}
+#endif
+#ifndef pud_same
static inline int pud_same(pud_t pud_a, pud_t pud_b)
{
return pud_val(pud_a) == pud_val(pud_b);
}
+#define pud_same pud_same
#endif
#ifndef __HAVE_ARCH_P4D_SAME
@@ -1041,27 +1136,6 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
#endif
/*
- * A facility to provide lazy MMU batching. This allows PTE updates and
- * page invalidations to be delayed until a call to leave lazy MMU mode
- * is issued. Some architectures may benefit from doing this, and it is
- * beneficial for both shadow and direct mode hypervisors, which may batch
- * the PTE updates which happen during this window. Note that using this
- * interface requires that read hazards be removed from the code. A read
- * hazard could result in the direct mode hypervisor case, since the actual
- * write to the page tables may not yet have taken place, so reads though
- * a raw PTE pointer after it has been modified are not guaranteed to be
- * up to date. This mode can only be entered and left under the protection of
- * the page table locks for all page tables which may be modified. In the UP
- * case, this is required so that preemption is disabled, and in the SMP case,
- * it must synchronize the delayed page table writes properly on other CPUs.
- */
-#ifndef __HAVE_ARCH_ENTER_LAZY_MMU_MODE
-#define arch_enter_lazy_mmu_mode() do {} while (0)
-#define arch_leave_lazy_mmu_mode() do {} while (0)
-#define arch_flush_lazy_mmu_mode() do {} while (0)
-#endif
-
-/*
* A facility to provide batching of the reload of page tables and
* other process state with the actual context switch code for
* paravirtualized guests. By convention, only one of the batched
@@ -1322,12 +1396,16 @@ static inline int pud_trans_unstable(pud_t *pud)
#ifndef CONFIG_NUMA_BALANCING
/*
- * Technically a PTE can be PROTNONE even when not doing NUMA balancing but
- * the only case the kernel cares is for NUMA balancing and is only ever set
- * when the VMA is accessible. For PROT_NONE VMAs, the PTEs are not marked
- * _PAGE_PROTNONE so by default, implement the helper as "always no". It
- * is the responsibility of the caller to distinguish between PROT_NONE
- * protections and NUMA hinting fault protections.
+ * In an inaccessible (PROT_NONE) VMA, pte_protnone() may indicate "yes". It is
+ * perfectly valid to indicate "no" in that case, which is why our default
+ * implementation defaults to "always no".
+ *
+ * In an accessible VMA, however, pte_protnone() reliably indicates PROT_NONE
+ * page protection due to NUMA hinting. NUMA hinting faults only apply in
+ * accessible VMAs.
+ *
+ * So, to reliably identify PROT_NONE PTEs that require a NUMA hinting fault,
+ * looking at the VMA accessibility is sufficient.
*/
static inline int pte_protnone(pte_t pte)
{
@@ -1499,6 +1577,9 @@ typedef unsigned int pgtbl_mod_mask;
#define has_transparent_hugepage() IS_BUILTIN(CONFIG_TRANSPARENT_HUGEPAGE)
#endif
+#ifndef has_transparent_pud_hugepage
+#define has_transparent_pud_hugepage() IS_BUILTIN(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+#endif
/*
* On some architectures it depends on the mm if the p4d/pud or pmd
* layer of the page table hierarchy is folded or not.
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 7d07f8736431..2b886ea654bb 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -600,7 +600,7 @@ void pcs_get_state(struct phylink_pcs *pcs,
*
* The %neg_mode argument should be tested via the phylink_mode_*() family of
* functions, or for PCS that set pcs->neg_mode true, should be tested
- * against the %PHYLINK_PCS_NEG_* definitions.
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
int pcs_config(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, const unsigned long *advertising,
@@ -630,7 +630,7 @@ void pcs_an_restart(struct phylink_pcs *pcs);
*
* The %mode argument should be tested via the phylink_mode_*() family of
* functions, or for PCS that set pcs->neg_mode true, should be tested
- * against the %PHYLINK_PCS_NEG_* definitions.
+ * against the PHYLINK_PCS_NEG_* definitions.
*/
void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index c758809d5bcf..f9f9931e02d6 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -17,18 +17,10 @@
struct fs_pin;
#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
-/*
- * sysctl for vm.memfd_noexec
- * 0: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
- * acts like MFD_EXEC was set.
- * 1: memfd_create() without MFD_EXEC nor MFD_NOEXEC_SEAL
- * acts like MFD_NOEXEC_SEAL was set.
- * 2: memfd_create() without MFD_NOEXEC_SEAL will be
- * rejected.
- */
-#define MEMFD_NOEXEC_SCOPE_EXEC 0
-#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1
-#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2
+/* modes for vm.memfd_noexec sysctl */
+#define MEMFD_NOEXEC_SCOPE_EXEC 0 /* MFD_EXEC implied if unset */
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_SEAL 1 /* MFD_NOEXEC_SEAL implied if unset */
+#define MEMFD_NOEXEC_SCOPE_NOEXEC_ENFORCED 2 /* same as 1, except MFD_EXEC rejected */
#endif
struct pid_namespace {
@@ -47,7 +39,6 @@ struct pid_namespace {
int reboot; /* group exit code if this pidns was rebooted */
struct ns_common ns;
#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
- /* sysctl for vm.memfd_noexec */
int memfd_noexec_scope;
#endif
} __randomize_layout;
@@ -64,6 +55,23 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
return ns;
}
+#if defined(CONFIG_SYSCTL) && defined(CONFIG_MEMFD_CREATE)
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ int scope = MEMFD_NOEXEC_SCOPE_EXEC;
+
+ for (; ns; ns = ns->parent)
+ scope = max(scope, READ_ONCE(ns->memfd_noexec_scope));
+
+ return scope;
+}
+#else
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ return 0;
+}
+#endif
+
extern struct pid_namespace *copy_pid_ns(unsigned long flags,
struct user_namespace *user_ns, struct pid_namespace *ns);
extern void zap_pid_ns_processes(struct pid_namespace *pid_ns);
@@ -78,6 +86,11 @@ static inline struct pid_namespace *get_pid_ns(struct pid_namespace *ns)
return ns;
}
+static inline int pidns_memfd_noexec_scope(struct pid_namespace *ns)
+{
+ return 0;
+}
+
static inline struct pid_namespace *copy_pid_ns(unsigned long flags,
struct user_namespace *user_ns, struct pid_namespace *ns)
{
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 54a06a4d2618..596ca4f95cfa 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -8,7 +8,7 @@
struct device;
struct bd6107_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 1a8b5b1946fe..323fbf5f7613 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -8,7 +8,7 @@
struct device;
struct gpio_backlight_platform_data {
- struct device *fbdev;
+ struct device *dev;
};
#endif
diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h
index c9da8d402750..95d85c1394bc 100644
--- a/include/linux/platform_data/lv5207lp.h
+++ b/include/linux/platform_data/lv5207lp.h
@@ -8,7 +8,7 @@
struct device;
struct lv5207lp_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int max_value;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/rtc-ds2404.h b/include/linux/platform_data/rtc-ds2404.h
deleted file mode 100644
index 22c53825528f..000000000000
--- a/include/linux/platform_data/rtc-ds2404.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- * ds2404.h - platform data structure for the DS2404 RTC.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 2012 Sven Schnelle <svens@stackframe.org>
- */
-
-#ifndef __LINUX_DS2404_H
-#define __LINUX_DS2404_H
-
-struct ds2404_platform_data {
-
- unsigned int gpio_rst;
- unsigned int gpio_clk;
- unsigned int gpio_dq;
-};
-#endif
diff --git a/include/linux/platform_data/video-mx3fb.h b/include/linux/platform_data/video-mx3fb.h
deleted file mode 100644
index d03dc322a616..000000000000
--- a/include/linux/platform_data/video-mx3fb.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2008
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- */
-
-#ifndef __ASM_ARCH_MX3FB_H__
-#define __ASM_ARCH_MX3FB_H__
-
-#include <linux/device.h>
-#include <linux/fb.h>
-
-/* Proprietary FB_SYNC_ flags */
-#define FB_SYNC_OE_ACT_HIGH 0x80000000
-#define FB_SYNC_CLK_INVERT 0x40000000
-#define FB_SYNC_DATA_INVERT 0x20000000
-#define FB_SYNC_CLK_IDLE_EN 0x10000000
-#define FB_SYNC_SHARP_MODE 0x08000000
-#define FB_SYNC_SWAP_RGB 0x04000000
-#define FB_SYNC_CLK_SEL_EN 0x02000000
-
-/*
- * Specify the way your display is connected. The IPU can arbitrarily
- * map the internal colors to the external data lines. We only support
- * the following mappings at the moment.
- */
-enum disp_data_mapping {
- /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
- IPU_DISP_DATA_MAPPING_RGB666,
- /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
- IPU_DISP_DATA_MAPPING_RGB565,
- /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
- IPU_DISP_DATA_MAPPING_RGB888,
-};
-
-/**
- * struct mx3fb_platform_data - mx3fb platform data
- *
- * @dma_dev: pointer to the dma-device, used for dma-slave connection
- * @mode: pointer to a platform-provided per mxc_register_fb() videomode
- */
-struct mx3fb_platform_data {
- struct device *dma_dev;
- const char *name;
- const struct fb_videomode *mode;
- int num_modes;
- enum disp_data_mapping disp_data_fmt;
-};
-
-#endif
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 28234dc9fa6a..16e99a1c37fc 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -66,6 +66,7 @@
#define ASUS_WMI_DEVID_CAMERA 0x00060013
#define ASUS_WMI_DEVID_LID_FLIP 0x00060062
#define ASUS_WMI_DEVID_LID_FLIP_ROG 0x00060077
+#define ASUS_WMI_DEVID_MINI_LED_MODE 0x0005001E
/* Storage */
#define ASUS_WMI_DEVID_CARDREADER 0x00080013
@@ -80,8 +81,19 @@
#define ASUS_WMI_DEVID_FAN_CTRL 0x00110012 /* deprecated */
#define ASUS_WMI_DEVID_CPU_FAN_CTRL 0x00110013
#define ASUS_WMI_DEVID_GPU_FAN_CTRL 0x00110014
+#define ASUS_WMI_DEVID_MID_FAN_CTRL 0x00110031
#define ASUS_WMI_DEVID_CPU_FAN_CURVE 0x00110024
#define ASUS_WMI_DEVID_GPU_FAN_CURVE 0x00110025
+#define ASUS_WMI_DEVID_MID_FAN_CURVE 0x00110032
+
+/* Tunables for AUS ROG laptops */
+#define ASUS_WMI_DEVID_PPT_PL2_SPPT 0x001200A0
+#define ASUS_WMI_DEVID_PPT_PL1_SPL 0x001200A3
+#define ASUS_WMI_DEVID_PPT_APU_SPPT 0x001200B0
+#define ASUS_WMI_DEVID_PPT_PLAT_SPPT 0x001200B1
+#define ASUS_WMI_DEVID_PPT_FPPT 0x001200C1
+#define ASUS_WMI_DEVID_NV_DYN_BOOST 0x001200C0
+#define ASUS_WMI_DEVID_NV_THERM_TARGET 0x001200C2
/* Power */
#define ASUS_WMI_DEVID_PROCESSOR_STATE 0x00120012
@@ -95,7 +107,12 @@
/* Keyboard dock */
#define ASUS_WMI_DEVID_KBD_DOCK 0x00120063
-/* dgpu on/off */
+/* Charging mode - 1=Barrel, 2=USB */
+#define ASUS_WMI_DEVID_CHARGE_MODE 0x0012006C
+
+/* epu is connected? 1 == true */
+#define ASUS_WMI_DEVID_EGPU_CONNECTED 0x00090018
+/* egpu on/off */
#define ASUS_WMI_DEVID_EGPU 0x00090019
/* dgpu on/off */
diff --git a/include/linux/platform_data/x86/simatic-ipc-base.h b/include/linux/platform_data/x86/simatic-ipc-base.h
index 57d6a10dfc9e..2d7f7120ba6b 100644
--- a/include/linux/platform_data/x86/simatic-ipc-base.h
+++ b/include/linux/platform_data/x86/simatic-ipc-base.h
@@ -2,7 +2,7 @@
/*
* Siemens SIMATIC IPC drivers
*
- * Copyright (c) Siemens AG, 2018-2021
+ * Copyright (c) Siemens AG, 2018-2023
*
* Authors:
* Henning Schild <henning.schild@siemens.com>
@@ -20,6 +20,9 @@
#define SIMATIC_IPC_DEVICE_127E 3
#define SIMATIC_IPC_DEVICE_227E 4
#define SIMATIC_IPC_DEVICE_227G 5
+#define SIMATIC_IPC_DEVICE_BX_21A 6
+#define SIMATIC_IPC_DEVICE_BX_39A 7
+#define SIMATIC_IPC_DEVICE_BX_59A 8
struct simatic_ipc_platform {
u8 devmode;
diff --git a/include/linux/platform_data/x86/simatic-ipc.h b/include/linux/platform_data/x86/simatic-ipc.h
index a48bb5240977..8d8b3b919674 100644
--- a/include/linux/platform_data/x86/simatic-ipc.h
+++ b/include/linux/platform_data/x86/simatic-ipc.h
@@ -2,7 +2,7 @@
/*
* Siemens SIMATIC IPC drivers
*
- * Copyright (c) Siemens AG, 2018-2021
+ * Copyright (c) Siemens AG, 2018-2023
*
* Authors:
* Henning Schild <henning.schild@siemens.com>
@@ -32,8 +32,12 @@ enum simatic_ipc_station_ids {
SIMATIC_IPC_IPC477E = 0x00000A02,
SIMATIC_IPC_IPC127E = 0x00000D01,
SIMATIC_IPC_IPC227G = 0x00000F01,
+ SIMATIC_IPC_IPC277G = 0x00000F02,
SIMATIC_IPC_IPCBX_39A = 0x00001001,
SIMATIC_IPC_IPCPX_39A = 0x00001002,
+ SIMATIC_IPC_IPCBX_21A = 0x00001101,
+ SIMATIC_IPC_IPCBX_56A = 0x00001201,
+ SIMATIC_IPC_IPCBX_59A = 0x00001202,
};
static inline u32 simatic_ipc_get_station_id(u8 *data, int max_len)
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index b845fd83f429..7a41c72c1959 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -63,6 +63,8 @@ extern struct resource *platform_get_mem_or_io(struct platform_device *,
extern struct device *
platform_find_device_by_driver(struct device *start,
const struct device_driver *drv);
+
+#ifdef CONFIG_HAS_IOMEM
extern void __iomem *
devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
unsigned int index, struct resource **res);
@@ -72,6 +74,32 @@ devm_platform_ioremap_resource(struct platform_device *pdev,
extern void __iomem *
devm_platform_ioremap_resource_byname(struct platform_device *pdev,
const char *name);
+#else
+
+static inline void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+
+static inline void __iomem *
+devm_platform_ioremap_resource(struct platform_device *pdev,
+ unsigned int index)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+#endif
+
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
extern int platform_irq_count(struct platform_device *);
diff --git a/include/linux/pm.h b/include/linux/pm.h
index badad7d11f4f..1400c37b29c7 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -448,6 +448,15 @@ const struct dev_pm_ops __maybe_unused name = { \
SET_RUNTIME_PM_OPS(suspend_fn, resume_fn, idle_fn) \
}
+/*
+ * Use this if you want to have the suspend and resume callbacks be called
+ * with IRQs disabled.
+ */
+#define DEFINE_NOIRQ_DEV_PM_OPS(name, suspend_fn, resume_fn) \
+const struct dev_pm_ops name = { \
+ NOIRQ_SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
+}
+
#define pm_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM), (_ptr))
#define pm_sleep_ptr(_ptr) PTR_IF(IS_ENABLED(CONFIG_PM_SLEEP), (_ptr))
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 253f2676d93a..de407e7c3b55 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -159,6 +159,7 @@ int proc_pid_arch_status(struct seq_file *m, struct pid_namespace *ns,
#endif /* CONFIG_PROC_PID_ARCH_STATUS */
void arch_report_meminfo(struct seq_file *m);
+void arch_proc_pid_thread_features(struct seq_file *m, struct task_struct *task);
#else /* CONFIG_PROC_FS */
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index 04ae1d9073a7..d2f9f690a9c1 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -298,7 +298,7 @@ struct pwm_chip {
int base;
unsigned int npwm;
- struct pwm_device * (*of_xlate)(struct pwm_chip *pc,
+ struct pwm_device * (*of_xlate)(struct pwm_chip *chip,
const struct of_phandle_args *args);
unsigned int of_pwm_n_cells;
@@ -395,9 +395,9 @@ struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip,
unsigned int index,
const char *label);
-struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *pc,
+struct pwm_device *of_pwm_xlate_with_flags(struct pwm_chip *chip,
const struct of_phandle_args *args);
-struct pwm_device *of_pwm_single_xlate(struct pwm_chip *pc,
+struct pwm_device *of_pwm_single_xlate(struct pwm_chip *chip,
const struct of_phandle_args *args);
struct pwm_device *pwm_get(struct device *dev, const char *con_id);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index f29aaaf2eb21..006e18decfad 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -108,6 +108,8 @@ extern const struct raid6_calls raid6_vpermxor1;
extern const struct raid6_calls raid6_vpermxor2;
extern const struct raid6_calls raid6_vpermxor4;
extern const struct raid6_calls raid6_vpermxor8;
+extern const struct raid6_calls raid6_lsx;
+extern const struct raid6_calls raid6_lasx;
struct raid6_recov_calls {
void (*data2)(int, size_t, int, int, void **);
@@ -123,6 +125,8 @@ extern const struct raid6_recov_calls raid6_recov_avx2;
extern const struct raid6_recov_calls raid6_recov_avx512;
extern const struct raid6_recov_calls raid6_recov_s390xc;
extern const struct raid6_recov_calls raid6_recov_neon;
+extern const struct raid6_recov_calls raid6_recov_lsx;
+extern const struct raid6_recov_calls raid6_recov_lasx;
extern const struct raid6_calls raid6_neonx1;
extern const struct raid6_calls raid6_neonx2;
diff --git a/include/linux/range.h b/include/linux/range.h
index 7efb6a9b069b..6ad0b73cb7ad 100644
--- a/include/linux/range.h
+++ b/include/linux/range.h
@@ -31,12 +31,4 @@ int clean_sort_range(struct range *range, int az);
void sort_range(struct range *range, int nr_range);
-#define MAX_RESOURCE ((resource_size_t)~0)
-static inline resource_size_t cap_resource(u64 val)
-{
- if (val > MAX_RESOURCE)
- return MAX_RESOURCE;
-
- return val;
-}
#endif
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index fe8978eb69f1..b4795698d8c2 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -690,6 +690,10 @@ int rproc_detach(struct rproc *rproc);
int rproc_set_firmware(struct rproc *rproc, const char *fw_name);
void rproc_report_crash(struct rproc *rproc, enum rproc_crash_type type);
void *rproc_da_to_va(struct rproc *rproc, u64 da, size_t len, bool *is_iomem);
+
+/* from remoteproc_coredump.c */
+void rproc_coredump_cleanup(struct rproc *rproc);
+void rproc_coredump(struct rproc *rproc);
void rproc_coredump_using_sections(struct rproc *rproc);
int rproc_coredump_add_segment(struct rproc *rproc, dma_addr_t da, size_t size);
int rproc_coredump_add_custom_segment(struct rproc *rproc,
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index b87d01660412..51cc21ebb568 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -198,6 +198,8 @@ void folio_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
void page_add_file_rmap(struct page *, struct vm_area_struct *,
bool compound);
+void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
+ struct vm_area_struct *, bool compound);
void page_remove_rmap(struct page *, struct vm_area_struct *,
bool compound);
@@ -477,7 +479,6 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
#define anon_vma_init() do {} while (0)
#define anon_vma_prepare(vma) (0)
-#define anon_vma_link(vma) do {} while (0)
static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg,
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h
index 523c98b96cb4..90d8e4475f80 100644
--- a/include/linux/rpmsg.h
+++ b/include/linux/rpmsg.h
@@ -64,12 +64,14 @@ struct rpmsg_device {
};
typedef int (*rpmsg_rx_cb_t)(struct rpmsg_device *, void *, int, void *, u32);
+typedef int (*rpmsg_flowcontrol_cb_t)(struct rpmsg_device *, void *, bool);
/**
* struct rpmsg_endpoint - binds a local rpmsg address to its user
* @rpdev: rpmsg channel device
* @refcount: when this drops to zero, the ept is deallocated
* @cb: rx callback handler
+ * @flow_cb: remote flow control callback handler
* @cb_lock: must be taken before accessing/changing @cb
* @addr: local rpmsg address
* @priv: private data for the driver's use
@@ -92,6 +94,7 @@ struct rpmsg_endpoint {
struct rpmsg_device *rpdev;
struct kref refcount;
rpmsg_rx_cb_t cb;
+ rpmsg_flowcontrol_cb_t flow_cb;
struct mutex cb_lock;
u32 addr;
void *priv;
@@ -106,6 +109,7 @@ struct rpmsg_endpoint {
* @probe: invoked when a matching rpmsg channel (i.e. device) is found
* @remove: invoked when the rpmsg channel is removed
* @callback: invoked when an inbound message is received on the channel
+ * @flowcontrol: invoked when remote side flow control request is received
*/
struct rpmsg_driver {
struct device_driver drv;
@@ -113,6 +117,7 @@ struct rpmsg_driver {
int (*probe)(struct rpmsg_device *dev);
void (*remove)(struct rpmsg_device *dev);
int (*callback)(struct rpmsg_device *, void *, int, void *, u32);
+ int (*flowcontrol)(struct rpmsg_device *, void *, bool);
};
static inline u16 rpmsg16_to_cpu(struct rpmsg_device *rpdev, __rpmsg16 val)
@@ -192,6 +197,8 @@ __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp,
ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept);
+int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst);
+
#else
static inline int rpmsg_register_device_override(struct rpmsg_device *rpdev,
@@ -316,6 +323,14 @@ static inline ssize_t rpmsg_get_mtu(struct rpmsg_endpoint *ept)
return -ENXIO;
}
+static inline int rpmsg_set_flow_control(struct rpmsg_endpoint *ept, bool pause, u32 dst)
+{
+ /* This shouldn't be possible */
+ WARN_ON(1);
+
+ return -ENXIO;
+}
+
#endif /* IS_ENABLED(CONFIG_RPMSG) */
/* use a macro to avoid include chaining to get THIS_MODULE */
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 1fd9c6a21ebe..4c0bcbeb1f00 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -146,6 +146,7 @@ struct rtc_device {
time64_t range_min;
timeu64_t range_max;
+ timeu64_t alarm_offset_max;
time64_t start_secs;
time64_t offset_secs;
bool set_start_time;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 177b3f3676ef..77f01ac385f7 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1671,7 +1671,7 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
BUILD_BUG_ON_NOT_POWER_OF_2(TASK_REPORT_MAX);
- if (tsk_state == TASK_IDLE)
+ if ((tsk_state & TASK_IDLE) == TASK_IDLE)
state = TASK_REPORT_IDLE;
/*
@@ -1679,7 +1679,7 @@ static inline unsigned int __task_state_index(unsigned int tsk_state,
* to userspace, we can make this appear as if the task has gone through
* a regular rt_mutex_lock() call.
*/
- if (tsk_state == TASK_RTLOCK_WAIT)
+ if (tsk_state & TASK_RTLOCK_WAIT)
state = TASK_UNINTERRUPTIBLE;
return fls(state);
@@ -1858,7 +1858,17 @@ extern int task_can_attach(struct task_struct *p);
extern int dl_bw_alloc(int cpu, u64 dl_bw);
extern void dl_bw_free(int cpu, u64 dl_bw);
#ifdef CONFIG_SMP
+
+/* do_set_cpus_allowed() - consider using set_cpus_allowed_ptr() instead */
extern void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask);
+
+/**
+ * set_cpus_allowed_ptr - set CPU affinity mask of a task
+ * @p: the task
+ * @new_mask: CPU affinity mask
+ *
+ * Return: zero if successful, or a negative error code
+ */
extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask);
extern int dup_user_cpus_ptr(struct task_struct *dst, struct task_struct *src, int node);
extern void release_user_cpus_ptr(struct task_struct *p);
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 669e8cff40c7..0014d3adaf84 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -649,12 +649,9 @@ extern void flush_itimer_signals(void);
extern bool current_is_single_threaded(void);
/*
- * Careful: do_each_thread/while_each_thread is a double loop so
- * 'break' will not work as expected - use goto instead.
+ * Without tasklist/siglock it is only rcu-safe if g can't exit/exec,
+ * otherwise next_thread(t) will never reach g after list_del_rcu(g).
*/
-#define do_each_thread(g, t) \
- for (g = t = &init_task ; (g = t = next_task(g)) != &init_task ; ) do
-
#define while_each_thread(g, t) \
while ((t = next_thread(t)) != g)
diff --git a/include/linux/secretmem.h b/include/linux/secretmem.h
index 988528b5da43..35f3a4a8ceb1 100644
--- a/include/linux/secretmem.h
+++ b/include/linux/secretmem.h
@@ -6,24 +6,23 @@
extern const struct address_space_operations secretmem_aops;
-static inline bool page_is_secretmem(struct page *page)
+static inline bool folio_is_secretmem(struct folio *folio)
{
struct address_space *mapping;
/*
- * Using page_mapping() is quite slow because of the actual call
- * instruction and repeated compound_head(page) inside the
- * page_mapping() function.
+ * Using folio_mapping() is quite slow because of the actual call
+ * instruction.
* We know that secretmem pages are not compound and LRU so we can
* save a couple of cycles here.
*/
- if (PageCompound(page) || !PageLRU(page))
+ if (folio_test_large(folio) || !folio_test_lru(folio))
return false;
mapping = (struct address_space *)
- ((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
+ ((unsigned long)folio->mapping & ~PAGE_MAPPING_FLAGS);
- if (!mapping || mapping != page->mapping)
+ if (!mapping || mapping != folio->mapping)
return false;
return mapping->a_ops == &secretmem_aops;
@@ -39,7 +38,7 @@ static inline bool vma_is_secretmem(struct vm_area_struct *vma)
return false;
}
-static inline bool page_is_secretmem(struct page *page)
+static inline bool folio_is_secretmem(struct folio *folio)
{
return false;
}
diff --git a/include/linux/security.h b/include/linux/security.h
index b2c38bfe5647..5f16eecde00b 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -145,7 +145,8 @@ extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz);
extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode);
extern int cap_ptrace_traceme(struct task_struct *parent);
-extern int cap_capget(struct task_struct *target, kernel_cap_t *effective, kernel_cap_t *inheritable, kernel_cap_t *permitted);
+extern int cap_capget(const struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted);
extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
@@ -268,10 +269,10 @@ int security_binder_transaction(const struct cred *from,
int security_binder_transfer_binder(const struct cred *from,
const struct cred *to);
int security_binder_transfer_file(const struct cred *from,
- const struct cred *to, struct file *file);
+ const struct cred *to, const struct file *file);
int security_ptrace_access_check(struct task_struct *child, unsigned int mode);
int security_ptrace_traceme(struct task_struct *parent);
-int security_capget(struct task_struct *target,
+int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted);
@@ -538,7 +539,7 @@ static inline int security_binder_transfer_binder(const struct cred *from,
static inline int security_binder_transfer_file(const struct cred *from,
const struct cred *to,
- struct file *file)
+ const struct file *file)
{
return 0;
}
@@ -554,7 +555,7 @@ static inline int security_ptrace_traceme(struct task_struct *parent)
return cap_ptrace_traceme(parent);
}
-static inline int security_capget(struct task_struct *target,
+static inline int security_capget(const struct task_struct *target,
kernel_cap_t *effective,
kernel_cap_t *inheritable,
kernel_cap_t *permitted)
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index bbae1e52ab4f..2ac50822554e 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -25,6 +25,9 @@ bool opal_unlock_from_suspend(struct opal_dev *dev);
struct opal_dev *init_opal_dev(void *data, sec_send_recv *send_recv);
int sed_ioctl(struct opal_dev *dev, unsigned int cmd, void __user *ioctl_ptr);
+#define OPAL_AUTH_KEY "opal-boot-pin"
+#define OPAL_AUTH_KEY_PREV "opal-boot-pin-prev"
+
static inline bool is_sed_ioctl(unsigned int cmd)
{
switch (cmd) {
@@ -47,6 +50,8 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_GET_STATUS:
case IOC_OPAL_GET_LR_STATUS:
case IOC_OPAL_GET_GEOMETRY:
+ case IOC_OPAL_DISCOVERY:
+ case IOC_OPAL_REVERT_LSP:
return true;
}
return false;
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index a156d2ed8d9e..bb6f073bc159 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -570,7 +570,7 @@ struct uart_port {
struct serial_port_device *port_dev; /* serial core port device */
unsigned long sysrq; /* sysrq timeout */
- unsigned int sysrq_ch; /* char for sysrq */
+ u8 sysrq_ch; /* char for sysrq */
unsigned char has_sysrq;
unsigned char sysrq_seq; /* index in sysrq_toggle_seq */
@@ -904,16 +904,16 @@ void uart_handle_dcd_change(struct uart_port *uport, bool active);
void uart_handle_cts_change(struct uart_port *uport, bool active);
void uart_insert_char(struct uart_port *port, unsigned int status,
- unsigned int overrun, unsigned int ch, unsigned int flag);
+ unsigned int overrun, u8 ch, u8 flag);
void uart_xchar_out(struct uart_port *uport, int offset);
#ifdef CONFIG_MAGIC_SYSRQ_SERIAL
#define SYSRQ_TIMEOUT (HZ * 5)
-bool uart_try_toggle_sysrq(struct uart_port *port, unsigned int ch);
+bool uart_try_toggle_sysrq(struct uart_port *port, u8 ch);
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -932,7 +932,7 @@ static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
if (!port->sysrq)
return 0;
@@ -953,7 +953,7 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int c
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- int sysrq_ch;
+ u8 sysrq_ch;
if (!port->has_sysrq) {
spin_unlock(&port->lock);
@@ -972,7 +972,7 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
- int sysrq_ch;
+ u8 sysrq_ch;
if (!port->has_sysrq) {
spin_unlock_irqrestore(&port->lock, flags);
@@ -988,11 +988,11 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port
handle_sysrq(sysrq_ch);
}
#else /* CONFIG_MAGIC_SYSRQ_SERIAL */
-static inline int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_handle_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
-static inline int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
+static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
{
return 0;
}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 848c7c82ad5a..8228d1276a2f 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/percpu-refcount.h>
#include <linux/cleanup.h>
+#include <linux/hash.h>
/*
@@ -345,6 +346,12 @@ static inline unsigned int arch_slab_minalign(void)
#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
(KMALLOC_MIN_SIZE) : 16)
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+#define RANDOM_KMALLOC_CACHES_NR 15 // # of cache copies
+#else
+#define RANDOM_KMALLOC_CACHES_NR 0
+#endif
+
/*
* Whenever changing this, take care of that kmalloc_type() and
* create_kmalloc_caches() still work as intended.
@@ -361,6 +368,8 @@ enum kmalloc_cache_type {
#ifndef CONFIG_MEMCG_KMEM
KMALLOC_CGROUP = KMALLOC_NORMAL,
#endif
+ KMALLOC_RANDOM_START = KMALLOC_NORMAL,
+ KMALLOC_RANDOM_END = KMALLOC_RANDOM_START + RANDOM_KMALLOC_CACHES_NR,
#ifdef CONFIG_SLUB_TINY
KMALLOC_RECLAIM = KMALLOC_NORMAL,
#else
@@ -386,14 +395,22 @@ kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
(IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
(IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
-static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
+extern unsigned long random_kmalloc_seed;
+
+static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags, unsigned long caller)
{
/*
* The most common case is KMALLOC_NORMAL, so test for it
* with a single branch for all the relevant flags.
*/
if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
+#ifdef CONFIG_RANDOM_KMALLOC_CACHES
+ /* RANDOM_KMALLOC_CACHES_NR (=15) copies + the KMALLOC_NORMAL */
+ return KMALLOC_RANDOM_START + hash_64(caller ^ random_kmalloc_seed,
+ ilog2(RANDOM_KMALLOC_CACHES_NR + 1));
+#else
return KMALLOC_NORMAL;
+#endif
/*
* At least one of the flags has to be set. Their priorities in
@@ -580,7 +597,7 @@ static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
index = kmalloc_index(size);
return kmalloc_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
+ kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, size);
}
return __kmalloc(size, flags);
@@ -596,7 +613,7 @@ static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t fla
index = kmalloc_index(size);
return kmalloc_node_trace(
- kmalloc_caches[kmalloc_type(flags)][index],
+ kmalloc_caches[kmalloc_type(flags, _RET_IP_)][index],
flags, node, size);
}
return __kmalloc_node(size, flags, node);
diff --git a/include/linux/soc/qcom/qcom_aoss.h b/include/linux/soc/qcom/qcom_aoss.h
index 3c2a82e606f8..7361ca028752 100644
--- a/include/linux/soc/qcom/qcom_aoss.h
+++ b/include/linux/soc/qcom/qcom_aoss.h
@@ -13,13 +13,13 @@ struct qmp;
#if IS_ENABLED(CONFIG_QCOM_AOSS_QMP)
-int qmp_send(struct qmp *qmp, const void *data, size_t len);
+int qmp_send(struct qmp *qmp, const char *fmt, ...);
struct qmp *qmp_get(struct device *dev);
void qmp_put(struct qmp *qmp);
#else
-static inline int qmp_send(struct qmp *qmp, const void *data, size_t len)
+static inline int qmp_send(struct qmp *qmp, const char *fmt, ...)
{
return -ENODEV;
}
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 2990f425fdef..8190878645f9 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -2,10 +2,13 @@
#ifndef __QCOM_SMD_RPM_H__
#define __QCOM_SMD_RPM_H__
+#include <linux/types.h>
+
struct qcom_smd_rpm;
-#define QCOM_SMD_RPM_ACTIVE_STATE 0
-#define QCOM_SMD_RPM_SLEEP_STATE 1
+#define QCOM_SMD_RPM_ACTIVE_STATE 0
+#define QCOM_SMD_RPM_SLEEP_STATE 1
+#define QCOM_SMD_RPM_STATE_NUM 2
/*
* Constants used for addressing resources in the RPM.
@@ -44,6 +47,19 @@ struct qcom_smd_rpm;
#define QCOM_SMD_RPM_PKA_CLK 0x616b70
#define QCOM_SMD_RPM_MCFG_CLK 0x6766636d
+#define QCOM_RPM_KEY_SOFTWARE_ENABLE 0x6e657773
+#define QCOM_RPM_KEY_PIN_CTRL_CLK_BUFFER_ENABLE_KEY 0x62636370
+#define QCOM_RPM_SMD_KEY_RATE 0x007a484b
+#define QCOM_RPM_SMD_KEY_ENABLE 0x62616e45
+#define QCOM_RPM_SMD_KEY_STATE 0x54415453
+#define QCOM_RPM_SCALING_ENABLE_ID 0x2
+
+struct clk_smd_rpm_req {
+ __le32 key;
+ __le32 nbytes;
+ __le32 value;
+};
+
int qcom_rpm_smd_write(struct qcom_smd_rpm *rpm,
int state,
u32 resource_type, u32 resource_id,
diff --git a/include/linux/soc/qcom/smem.h b/include/linux/soc/qcom/smem.h
index 223db6a9c733..a36a3b9d4929 100644
--- a/include/linux/soc/qcom/smem.h
+++ b/include/linux/soc/qcom/smem.h
@@ -4,6 +4,7 @@
#define QCOM_SMEM_HOST_ANY -1
+bool qcom_smem_is_available(void);
int qcom_smem_alloc(unsigned host, unsigned item, size_t size);
void *qcom_smem_get(unsigned host, unsigned item, size_t *size);
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index 8923387a7405..4f3d14bb1538 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -486,6 +486,11 @@ struct sdw_slave_id {
__u8 sdw_version:4;
};
+struct sdw_extended_slave_id {
+ int link_id;
+ struct sdw_slave_id id;
+};
+
/*
* Helper macros to extract the MIPI-defined IDs
*
@@ -853,6 +858,8 @@ struct sdw_defer {
* @post_bank_switch: Callback for post bank switch
* @read_ping_status: Read status from PING frames, reported with two bits per Device.
* Bits 31:24 are reserved.
+ * @get_device_num: Callback for vendor-specific device_number allocation
+ * @put_device_num: Callback for vendor-specific device_number release
* @new_peripheral_assigned: Callback to handle enumeration of new peripheral.
*/
struct sdw_master_ops {
@@ -868,7 +875,11 @@ struct sdw_master_ops {
int (*pre_bank_switch)(struct sdw_bus *bus);
int (*post_bank_switch)(struct sdw_bus *bus);
u32 (*read_ping_status)(struct sdw_bus *bus);
- void (*new_peripheral_assigned)(struct sdw_bus *bus, int dev_num);
+ int (*get_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*put_device_num)(struct sdw_bus *bus, struct sdw_slave *slave);
+ void (*new_peripheral_assigned)(struct sdw_bus *bus,
+ struct sdw_slave *slave,
+ int dev_num);
};
/**
@@ -903,9 +914,6 @@ struct sdw_master_ops {
* meaningful if multi_link is set. If set to 1, hardware-based
* synchronization will be used even if a stream only uses a single
* SoundWire segment.
- * @dev_num_ida_min: if set, defines the minimum values for the IDA
- * used to allocate system-unique device numbers. This value needs to be
- * identical across all SoundWire bus in the system.
*/
struct sdw_bus {
struct device *dev;
@@ -934,7 +942,6 @@ struct sdw_bus {
u32 bank_switch_timeout;
bool multi_link;
int hw_sync_min_links;
- int dev_num_ida_min;
};
int sdw_bus_master_add(struct sdw_bus *bus, struct device *parent,
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index 11fc88fb0d78..00bb22d96ae5 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -264,11 +264,6 @@ struct sdw_intel_link_dev;
*/
#define SDW_INTEL_CLK_STOP_BUS_RESET BIT(3)
-struct sdw_intel_slave_id {
- int link_id;
- struct sdw_slave_id id;
-};
-
struct hdac_bus;
/**
@@ -298,7 +293,7 @@ struct sdw_intel_ctx {
int num_slaves;
acpi_handle handle;
struct sdw_intel_link_dev **ldev;
- struct sdw_intel_slave_id *ids;
+ struct sdw_extended_slave_id *ids;
struct list_head link_list;
struct mutex shim_lock; /* lock for access to shared SHIM registers */
u32 shim_mask;
@@ -433,4 +428,11 @@ struct sdw_intel_hw_ops {
extern const struct sdw_intel_hw_ops sdw_intel_cnl_hw_ops;
extern const struct sdw_intel_hw_ops sdw_intel_lnl_hw_ops;
+/*
+ * IDA min selected to allow for 5 unconstrained devices per link,
+ * and 6 system-unique Device Numbers for wake-capable devices.
+ */
+
+#define SDW_INTEL_DEV_NUM_IDA_MIN 6
+
#endif
diff --git a/include/linux/sprintf.h b/include/linux/sprintf.h
new file mode 100644
index 000000000000..33dcbec71925
--- /dev/null
+++ b/include/linux/sprintf.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KERNEL_SPRINTF_H_
+#define _LINUX_KERNEL_SPRINTF_H_
+
+#include <linux/compiler_attributes.h>
+#include <linux/types.h>
+
+int num_to_str(char *buf, int size, unsigned long long num, unsigned int width);
+
+__printf(2, 3) int sprintf(char *buf, const char * fmt, ...);
+__printf(2, 0) int vsprintf(char *buf, const char *, va_list);
+__printf(3, 4) int snprintf(char *buf, size_t size, const char *fmt, ...);
+__printf(3, 0) int vsnprintf(char *buf, size_t size, const char *fmt, va_list args);
+__printf(3, 4) int scnprintf(char *buf, size_t size, const char *fmt, ...);
+__printf(3, 0) int vscnprintf(char *buf, size_t size, const char *fmt, va_list args);
+__printf(2, 3) __malloc char *kasprintf(gfp_t gfp, const char *fmt, ...);
+__printf(2, 0) __malloc char *kvasprintf(gfp_t gfp, const char *fmt, va_list args);
+__printf(2, 0) const char *kvasprintf_const(gfp_t gfp, const char *fmt, va_list args);
+
+__scanf(2, 3) int sscanf(const char *, const char *, ...);
+__scanf(2, 0) int vsscanf(const char *, const char *, va_list);
+
+/* These are for specific cases, do not use without real need */
+extern bool no_hash_pointers;
+int no_hash_pointers_enable(char *str);
+
+#endif /* _LINUX_KERNEL_SPRINTF_H */
diff --git a/include/linux/string_choices.h b/include/linux/string_choices.h
index 48120222b9b2..3c1091941eb8 100644
--- a/include/linux/string_choices.h
+++ b/include/linux/string_choices.h
@@ -30,6 +30,7 @@ static inline const char *str_read_write(bool v)
{
return v ? "read" : "write";
}
+#define str_write_read(v) str_read_write(!(v))
static inline const char *str_on_off(bool v)
{
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 789ab30045da..9d1f5bb74dd5 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -109,6 +109,8 @@ char *kstrdup_quotable(const char *src, gfp_t gfp);
char *kstrdup_quotable_cmdline(struct task_struct *task, gfp_t gfp);
char *kstrdup_quotable_file(struct file *file, gfp_t gfp);
+char *kstrdup_and_replace(const char *src, char old, char new, gfp_t gfp);
+
char **kasprintf_strarray(gfp_t gfp, const char *prefix, size_t n);
void kfree_strarray(char **array, size_t n);
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index 518bd28f5ab8..35766963dd14 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -56,10 +56,14 @@ struct cache_head {
struct kref ref;
unsigned long flags;
};
-#define CACHE_VALID 0 /* Entry contains valid data */
-#define CACHE_NEGATIVE 1 /* Negative entry - there is no match for the key */
-#define CACHE_PENDING 2 /* An upcall has been sent but no reply received yet*/
-#define CACHE_CLEANED 3 /* Entry has been cleaned from cache */
+
+/* cache_head.flags */
+enum {
+ CACHE_VALID, /* Entry contains valid data */
+ CACHE_NEGATIVE, /* Negative entry - there is no match for the key */
+ CACHE_PENDING, /* An upcall has been sent but no reply received yet*/
+ CACHE_CLEANED, /* Entry has been cleaned from cache */
+};
#define CACHE_NEW_EXPIRY 120 /* keep new things pending confirmation for 120 seconds */
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 4f41d839face..af7358277f1c 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -148,6 +148,8 @@ struct rpc_create_args {
const struct cred *cred;
unsigned int max_connect;
struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct rpc_add_xprt_test {
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index d94d4f410507..3ce1550d1beb 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -43,22 +43,6 @@ struct net;
#ifdef CONFIG_PROC_FS
int rpc_proc_init(struct net *);
void rpc_proc_exit(struct net *);
-#else
-static inline int rpc_proc_init(struct net *net)
-{
- return 0;
-}
-
-static inline void rpc_proc_exit(struct net *net)
-{
-}
-#endif
-
-#ifdef MODULE
-void rpc_modcount(struct inode *, int);
-#endif
-
-#ifdef CONFIG_PROC_FS
struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *);
void rpc_proc_unregister(struct net *,const char *);
void rpc_proc_zero(const struct rpc_program *);
@@ -69,7 +53,14 @@ void svc_proc_unregister(struct net *, const char *);
void svc_seq_show(struct seq_file *,
const struct svc_stat *);
#else
+static inline int rpc_proc_init(struct net *net)
+{
+ return 0;
+}
+static inline void rpc_proc_exit(struct net *net)
+{
+}
static inline struct proc_dir_entry *rpc_proc_register(struct net *net, struct rpc_stat *s) { return NULL; }
static inline void rpc_proc_unregister(struct net *net, const char *p) {}
static inline void rpc_proc_zero(const struct rpc_program *p) {}
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index f8751118c122..dbf5b21feafe 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -39,16 +39,20 @@ struct svc_pool {
struct list_head sp_all_threads; /* all server threads */
/* statistics on pool operation */
+ struct percpu_counter sp_messages_arrived;
struct percpu_counter sp_sockets_queued;
struct percpu_counter sp_threads_woken;
- struct percpu_counter sp_threads_timedout;
-#define SP_TASK_PENDING (0) /* still work to do even if no
- * xprt is queued. */
-#define SP_CONGESTED (1)
unsigned long sp_flags;
} ____cacheline_aligned_in_smp;
+/* bits for sp_flags */
+enum {
+ SP_TASK_PENDING, /* still work to do even if no xprt is queued */
+ SP_CONGESTED, /* all threads are busy, none idle */
+};
+
+
/*
* RPC service.
*
@@ -120,19 +124,6 @@ static inline void svc_put(struct svc_serv *serv)
kref_put(&serv->sv_refcnt, svc_destroy);
}
-/**
- * svc_put_not_last - decrement non-final reference count on SUNRPC serv
- * @serv: the svc_serv to have count decremented
- *
- * Returns: %true is refcount was decremented.
- *
- * If the refcount is 1, it is not decremented and instead failure is reported.
- */
-static inline bool svc_put_not_last(struct svc_serv *serv)
-{
- return refcount_dec_not_one(&serv->sv_refcnt.refcount);
-}
-
/*
* Maximum payload size supported by a kernel RPC server.
* This is use to determine the max number of pages nfsd is
@@ -232,16 +223,6 @@ struct svc_rqst {
u32 rq_proc; /* procedure number */
u32 rq_prot; /* IP protocol */
int rq_cachetype; /* catering to nfsd */
-#define RQ_SECURE (0) /* secure port */
-#define RQ_LOCAL (1) /* local request */
-#define RQ_USEDEFERRAL (2) /* use deferral */
-#define RQ_DROPME (3) /* drop current reply */
-#define RQ_SPLICE_OK (4) /* turned off in gss privacy
- * to prevent encrypting page
- * cache pages */
-#define RQ_VICTIM (5) /* about to be shut down */
-#define RQ_BUSY (6) /* request is busy */
-#define RQ_DATA (7) /* request has data */
unsigned long rq_flags; /* flags field */
ktime_t rq_qtime; /* enqueue time */
@@ -265,7 +246,6 @@ struct svc_rqst {
/* Catering to nfsd */
struct auth_domain * rq_client; /* RPC peer info */
struct auth_domain * rq_gssclient; /* "gss/"-style peer info */
- struct svc_cacherep * rq_cacherep; /* cache info */
struct task_struct *rq_task; /* service thread */
struct net *rq_bc_net; /* pointer to backchannel's
* net namespace
@@ -273,6 +253,19 @@ struct svc_rqst {
void ** rq_lease_breaker; /* The v4 client breaking a lease */
};
+/* bits for rq_flags */
+enum {
+ RQ_SECURE, /* secure port */
+ RQ_LOCAL, /* local request */
+ RQ_USEDEFERRAL, /* use deferral */
+ RQ_DROPME, /* drop current reply */
+ RQ_SPLICE_OK, /* turned off in gss privacy to prevent
+ * encrypting page cache pages */
+ RQ_VICTIM, /* about to be shut down */
+ RQ_BUSY, /* request is busy */
+ RQ_DATA, /* request has data */
+};
+
#define SVC_NET(rqst) (rqst->rq_xprt ? rqst->rq_xprt->xpt_net : rqst->rq_bc_net)
/*
@@ -344,7 +337,7 @@ struct svc_program {
char * pg_name; /* service name */
char * pg_class; /* class name: services sharing authentication */
struct svc_stat * pg_stats; /* rpc statistics */
- int (*pg_authenticate)(struct svc_rqst *);
+ enum svc_auth_status (*pg_authenticate)(struct svc_rqst *rqstp);
__be32 (*pg_init_request)(struct svc_rqst *,
const struct svc_program *,
struct svc_process_info *);
@@ -427,6 +420,7 @@ int svc_register(const struct svc_serv *, struct net *, const int,
void svc_wake_up(struct svc_serv *);
void svc_reserve(struct svc_rqst *rqstp, int space);
+void svc_pool_wake_idle_thread(struct svc_pool *pool);
struct svc_pool *svc_pool_for_cpu(struct svc_serv *serv);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
const char * svc_proc_name(const struct svc_rqst *rqstp);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index a6b12631db21..fa55d12dc765 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -56,23 +56,6 @@ struct svc_xprt {
struct list_head xpt_list;
struct list_head xpt_ready;
unsigned long xpt_flags;
-#define XPT_BUSY 0 /* enqueued/receiving */
-#define XPT_CONN 1 /* conn pending */
-#define XPT_CLOSE 2 /* dead or dying */
-#define XPT_DATA 3 /* data pending */
-#define XPT_TEMP 4 /* connected transport */
-#define XPT_DEAD 6 /* transport closed */
-#define XPT_CHNGBUF 7 /* need to change snd/rcv buf sizes */
-#define XPT_DEFERRED 8 /* deferred request pending */
-#define XPT_OLD 9 /* used for xprt aging mark+sweep */
-#define XPT_LISTENER 10 /* listening endpoint */
-#define XPT_CACHE_AUTH 11 /* cache auth info */
-#define XPT_LOCAL 12 /* connection from loopback interface */
-#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
-#define XPT_CONG_CTRL 14 /* has congestion control */
-#define XPT_HANDSHAKE 15 /* xprt requests a handshake */
-#define XPT_TLS_SESSION 16 /* transport-layer security established */
-#define XPT_PEER_AUTH 17 /* peer has been authenticated */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
@@ -97,6 +80,27 @@ struct svc_xprt {
struct rpc_xprt_switch *xpt_bc_xps; /* NFSv4.1 backchannel */
};
+/* flag bits for xpt_flags */
+enum {
+ XPT_BUSY, /* enqueued/receiving */
+ XPT_CONN, /* conn pending */
+ XPT_CLOSE, /* dead or dying */
+ XPT_DATA, /* data pending */
+ XPT_TEMP, /* connected transport */
+ XPT_DEAD, /* transport closed */
+ XPT_CHNGBUF, /* need to change snd/rcv buf sizes */
+ XPT_DEFERRED, /* deferred request pending */
+ XPT_OLD, /* used for xprt aging mark+sweep */
+ XPT_LISTENER, /* listening endpoint */
+ XPT_CACHE_AUTH, /* cache auth info */
+ XPT_LOCAL, /* connection from loopback interface */
+ XPT_KILL_TEMP, /* call xpo_kill_temp_xprt before closing */
+ XPT_CONG_CTRL, /* has congestion control */
+ XPT_HANDSHAKE, /* xprt requests a handshake */
+ XPT_TLS_SESSION, /* transport-layer security established */
+ XPT_PEER_AUTH, /* peer has been authenticated */
+};
+
static inline void unregister_xpt_user(struct svc_xprt *xpt, struct svc_xpt_user *u)
{
spin_lock(&xpt->xpt_lock);
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 6d9cc9080aca..6f90203edbf8 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -83,6 +83,19 @@ struct auth_domain {
struct rcu_head rcu_head;
};
+enum svc_auth_status {
+ SVC_GARBAGE = 1,
+ SVC_SYSERR,
+ SVC_VALID,
+ SVC_NEGATIVE,
+ SVC_OK,
+ SVC_DROP,
+ SVC_CLOSE,
+ SVC_DENIED,
+ SVC_PENDING,
+ SVC_COMPLETE,
+};
+
/*
* Each authentication flavour registers an auth_ops
* structure.
@@ -98,6 +111,8 @@ struct auth_domain {
* is (probably) already in place. Certainly space is
* reserved for it.
* DROP - simply drop the request. It may have been deferred
+ * CLOSE - like SVC_DROP, but request is definitely lost.
+ * If there is a tcp connection, it should be closed.
* GARBAGE - rpc garbage_args error
* SYSERR - rpc system_err error
* DENIED - authp holds reason for denial.
@@ -111,14 +126,10 @@ struct auth_domain {
*
* release() is given a request after the procedure has been run.
* It should sign/encrypt the results if needed
- * It should return:
- * OK - the resbuf is ready to be sent
- * DROP - the reply should be quitely dropped
- * DENIED - authp holds a reason for MSG_DENIED
- * SYSERR - rpc system_err
*
* domain_release()
* This call releases a domain.
+ *
* set_client()
* Givens a pending request (struct svc_rqst), finds and assigns
* an appropriate 'auth_domain' as the client.
@@ -127,44 +138,28 @@ struct auth_ops {
char * name;
struct module *owner;
int flavour;
- int (*accept)(struct svc_rqst *rq);
- int (*release)(struct svc_rqst *rq);
- void (*domain_release)(struct auth_domain *);
- int (*set_client)(struct svc_rqst *rq);
-};
-#define SVC_GARBAGE 1
-#define SVC_SYSERR 2
-#define SVC_VALID 3
-#define SVC_NEGATIVE 4
-#define SVC_OK 5
-#define SVC_DROP 6
-#define SVC_CLOSE 7 /* Like SVC_DROP, but request is definitely
- * lost so if there is a tcp connection, it
- * should be closed
- */
-#define SVC_DENIED 8
-#define SVC_PENDING 9
-#define SVC_COMPLETE 10
+ enum svc_auth_status (*accept)(struct svc_rqst *rqstp);
+ int (*release)(struct svc_rqst *rqstp);
+ void (*domain_release)(struct auth_domain *dom);
+ enum svc_auth_status (*set_client)(struct svc_rqst *rqstp);
+};
struct svc_xprt;
-extern int svc_authenticate(struct svc_rqst *rqstp);
+extern enum svc_auth_status svc_authenticate(struct svc_rqst *rqstp);
extern int svc_authorise(struct svc_rqst *rqstp);
-extern int svc_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svc_set_client(struct svc_rqst *rqstp);
extern int svc_auth_register(rpc_authflavor_t flavor, struct auth_ops *aops);
extern void svc_auth_unregister(rpc_authflavor_t flavor);
extern struct auth_domain *unix_domain_find(char *name);
extern void auth_domain_put(struct auth_domain *item);
-extern int auth_unix_add_addr(struct net *net, struct in6_addr *addr, struct auth_domain *dom);
extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *new);
extern struct auth_domain *auth_domain_find(char *name);
-extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
-extern int auth_unix_forget_old(struct auth_domain *dom);
extern void svcauth_unix_purge(struct net *net);
extern void svcauth_unix_info_release(struct svc_xprt *xpt);
-extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
+extern enum svc_auth_status svcauth_unix_set_client(struct svc_rqst *rqstp);
extern int unix_gid_cache_create(struct net *net);
extern void unix_gid_cache_destroy(struct net *net);
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index a7116048a4d4..7c78ec6356b9 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -35,8 +35,8 @@ struct svc_sock {
/* Total length of the data (not including fragment headers)
* received so far in the fragments making up this rpc: */
u32 sk_datalen;
- /* Number of queued send requests */
- atomic_t sk_sendqlen;
+
+ struct page_frag_cache sk_frag_cache;
struct completion sk_handshake_done;
@@ -56,8 +56,7 @@ static inline u32 svc_sock_final_rec(struct svc_sock *svsk)
/*
* Function prototypes.
*/
-void svc_close_net(struct svc_serv *, struct net *);
-int svc_recv(struct svc_rqst *, long);
+void svc_recv(struct svc_rqst *rqstp);
void svc_send(struct svc_rqst *rqstp);
void svc_drop(struct svc_rqst *);
void svc_sock_update_bufs(struct svc_serv *serv);
@@ -66,8 +65,6 @@ int svc_addsock(struct svc_serv *serv, struct net *net,
const struct cred *cred);
void svc_init_xprt_sock(void);
void svc_cleanup_xprt_sock(void);
-struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot);
-void svc_sock_destroy(struct svc_xprt *);
/*
* svc_makesock socket characteristics
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index f89ec4b5ea16..2f8dc47f1eb0 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -139,6 +139,8 @@ void xdr_terminate_string(const struct xdr_buf *, const u32);
size_t xdr_buf_pagecount(const struct xdr_buf *buf);
int xdr_alloc_bvec(struct xdr_buf *buf, gfp_t gfp);
void xdr_free_bvec(struct xdr_buf *buf);
+unsigned int xdr_buf_to_bvec(struct bio_vec *bvec, unsigned int bvec_size,
+ const struct xdr_buf *xdr);
static inline __be32 *xdr_encode_array(__be32 *p, const void *s, unsigned int len)
{
@@ -224,6 +226,7 @@ struct xdr_stream {
struct kvec *iov; /* pointer to the current kvec */
struct kvec scratch; /* Scratch buffer */
struct page **page_ptr; /* pointer to the current page */
+ void *page_kaddr; /* kmapped address of the current page */
unsigned int nwords; /* Remaining decode buffer length */
struct rpc_rqst *rqst; /* For debugging */
@@ -255,6 +258,7 @@ extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf,
__be32 *p, struct rpc_rqst *rqst);
extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
struct page **pages, unsigned int len);
+extern void xdr_finish_decode(struct xdr_stream *xdr);
extern __be32 *xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes);
extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
@@ -775,7 +779,7 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
- if (len > SIZE_MAX / sizeof(*p))
+ if (U32_MAX >= SIZE_MAX / sizeof(*p) && len > SIZE_MAX / sizeof(*p))
return -EBADMSG;
p = xdr_inline_decode(xdr, len * sizeof(*p));
if (unlikely(!p))
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index b52411bcfe4e..4ecc89301eb7 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -351,6 +351,8 @@ struct xprt_create {
struct rpc_xprt_switch *bc_xps;
unsigned int flags;
struct xprtsec_parms xprtsec;
+ unsigned long connect_timeout;
+ unsigned long reconnect_timeout;
};
struct xprt_class {
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 456546443f1f..493487ed7c38 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -302,10 +302,6 @@ struct swap_info_struct {
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
struct completion comp; /* seldom referenced */
-#ifdef CONFIG_FRONTSWAP
- unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
- atomic_t frontswap_pages; /* frontswap pages in-use counter */
-#endif
spinlock_t lock; /*
* protect map scan related fields like
* swap_map, lowest_bit, highest_bit,
@@ -337,15 +333,13 @@ struct swap_info_struct {
*/
};
-static inline swp_entry_t folio_swap_entry(struct folio *folio)
+static inline swp_entry_t page_swap_entry(struct page *page)
{
- swp_entry_t entry = { .val = page_private(&folio->page) };
- return entry;
-}
+ struct folio *folio = page_folio(page);
+ swp_entry_t entry = folio->swap;
-static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
-{
- folio->private = (void *)entry.val;
+ entry.val += folio_page_idx(folio, page);
+ return entry;
}
/* linux/mm/workingset.c */
@@ -630,11 +624,6 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
#endif
-#ifdef CONFIG_ZSWAP
-extern u64 zswap_pool_total_size;
-extern atomic_t zswap_stored_pages;
-#endif
-
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp);
static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 7ed529a77c5b..99e3ed469e88 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -2,11 +2,6 @@
#ifndef _LINUX_SWAPFILE_H
#define _LINUX_SWAPFILE_H
-/*
- * these were static in swapfile.c but frontswap.c needs them and we don't
- * want to expose them to the dozens of source files that include swap.h
- */
-extern struct swap_info_struct *swap_info[];
extern unsigned long generic_max_swapfile_size(void);
unsigned long arch_max_swapfile_size(void);
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 4c932cb45e0b..bff1e8d97de0 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -393,7 +393,12 @@ static inline bool is_migration_entry_dirty(swp_entry_t entry)
typedef unsigned long pte_marker;
#define PTE_MARKER_UFFD_WP BIT(0)
-#define PTE_MARKER_SWAPIN_ERROR BIT(1)
+/*
+ * "Poisoned" here is meant in the very general sense of "future accesses are
+ * invalid", instead of referring very specifically to hardware memory errors.
+ * This marker is meant to represent any of various different causes of this.
+ */
+#define PTE_MARKER_POISONED BIT(1)
#define PTE_MARKER_MASK (BIT(2) - 1)
static inline swp_entry_t make_pte_marker_entry(pte_marker marker)
@@ -421,15 +426,15 @@ static inline pte_t make_pte_marker(pte_marker marker)
return swp_entry_to_pte(make_pte_marker_entry(marker));
}
-static inline swp_entry_t make_swapin_error_entry(void)
+static inline swp_entry_t make_poisoned_swp_entry(void)
{
- return make_pte_marker_entry(PTE_MARKER_SWAPIN_ERROR);
+ return make_pte_marker_entry(PTE_MARKER_POISONED);
}
-static inline int is_swapin_error_entry(swp_entry_t entry)
+static inline int is_poisoned_swp_entry(swp_entry_t entry)
{
return is_pte_marker_entry(entry) &&
- (pte_marker_get(entry) & PTE_MARKER_SWAPIN_ERROR);
+ (pte_marker_get(entry) & PTE_MARKER_POISONED);
}
/*
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index 4e52cd5e0bdc..b4536626f8ff 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/limits.h>
#include <linux/spinlock.h>
+#include <linux/workqueue.h>
struct device;
struct page;
@@ -62,8 +63,7 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
#ifdef CONFIG_SWIOTLB
/**
- * struct io_tlb_mem - IO TLB Memory Pool Descriptor
- *
+ * struct io_tlb_pool - IO TLB memory pool descriptor
* @start: The start address of the swiotlb memory pool. Used to do a quick
* range check to see if the memory was in fact allocated by this
* API.
@@ -73,19 +73,48 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
* @vaddr: The vaddr of the swiotlb memory pool. The swiotlb memory pool
* may be remapped in the memory encrypted case and store virtual
* address for bounce buffer operation.
- * @nslabs: The number of IO TLB blocks (in groups of 64) between @start and
- * @end. For default swiotlb, this is command line adjustable via
- * setup_io_tlb_npages.
- * @list: The free list describing the number of free entries available
- * from each index.
- * @orig_addr: The original address corresponding to a mapped entry.
- * @alloc_size: Size of the allocated buffer.
+ * @nslabs: The number of IO TLB slots between @start and @end. For the
+ * default swiotlb, this can be adjusted with a boot parameter,
+ * see setup_io_tlb_npages().
+ * @late_alloc: %true if allocated using the page allocator.
+ * @nareas: Number of areas in the pool.
+ * @area_nslabs: Number of slots in each area.
+ * @areas: Array of memory area descriptors.
+ * @slots: Array of slot descriptors.
+ * @node: Member of the IO TLB memory pool list.
+ * @rcu: RCU head for swiotlb_dyn_free().
+ * @transient: %true if transient memory pool.
+ */
+struct io_tlb_pool {
+ phys_addr_t start;
+ phys_addr_t end;
+ void *vaddr;
+ unsigned long nslabs;
+ bool late_alloc;
+ unsigned int nareas;
+ unsigned int area_nslabs;
+ struct io_tlb_area *areas;
+ struct io_tlb_slot *slots;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ struct list_head node;
+ struct rcu_head rcu;
+ bool transient;
+#endif
+};
+
+/**
+ * struct io_tlb_mem - Software IO TLB allocator
+ * @defpool: Default (initial) IO TLB memory pool descriptor.
+ * @pool: IO TLB memory pool descriptor (if not dynamic).
+ * @nslabs: Total number of IO TLB slabs in all pools.
* @debugfs: The dentry to debugfs.
- * @late_alloc: %true if allocated using the page allocator
* @force_bounce: %true if swiotlb bouncing is forced
* @for_alloc: %true if the pool is used for memory allocation
- * @nareas: The area number in the pool.
- * @area_nslabs: The slot number in the area.
+ * @can_grow: %true if more pools can be allocated dynamically.
+ * @phys_limit: Maximum allowed physical address.
+ * @lock: Lock to synchronize changes to the list.
+ * @pools: List of IO TLB memory pool descriptors (if dynamic).
+ * @dyn_alloc: Dynamic IO TLB pool allocation work.
* @total_used: The total number of slots in the pool that are currently used
* across all areas. Used only for calculating used_hiwater in
* debugfs.
@@ -93,30 +122,64 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
* in debugfs.
*/
struct io_tlb_mem {
- phys_addr_t start;
- phys_addr_t end;
- void *vaddr;
+ struct io_tlb_pool defpool;
unsigned long nslabs;
struct dentry *debugfs;
- bool late_alloc;
bool force_bounce;
bool for_alloc;
- unsigned int nareas;
- unsigned int area_nslabs;
- struct io_tlb_area *areas;
- struct io_tlb_slot *slots;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ bool can_grow;
+ u64 phys_limit;
+ spinlock_t lock;
+ struct list_head pools;
+ struct work_struct dyn_alloc;
+#endif
#ifdef CONFIG_DEBUG_FS
atomic_long_t total_used;
atomic_long_t used_hiwater;
#endif
};
-extern struct io_tlb_mem io_tlb_default_mem;
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+
+struct io_tlb_pool *swiotlb_find_pool(struct device *dev, phys_addr_t paddr);
+
+#else
+
+static inline struct io_tlb_pool *swiotlb_find_pool(struct device *dev,
+ phys_addr_t paddr)
+{
+ return &dev->dma_io_tlb_mem->defpool;
+}
+
+#endif
+
+/**
+ * is_swiotlb_buffer() - check if a physical address belongs to a swiotlb
+ * @dev: Device which has mapped the buffer.
+ * @paddr: Physical address within the DMA buffer.
+ *
+ * Check if @paddr points into a bounce buffer.
+ *
+ * Return:
+ * * %true if @paddr points into a bounce buffer
+ * * %false otherwise
+ */
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
- return mem && paddr >= mem->start && paddr < mem->end;
+ if (!mem)
+ return false;
+
+ if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
+ /* Pairs with smp_wmb() in swiotlb_find_slots() and
+ * swiotlb_dyn_alloc(), which modify the RCU lists.
+ */
+ smp_rmb();
+ return swiotlb_find_pool(dev, paddr);
+ }
+ return paddr >= mem->defpool.start && paddr < mem->defpool.end;
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
@@ -128,13 +191,22 @@ static inline bool is_swiotlb_force_bounce(struct device *dev)
void swiotlb_init(bool addressing_limited, unsigned int flags);
void __init swiotlb_exit(void);
+void swiotlb_dev_init(struct device *dev);
size_t swiotlb_max_mapping_size(struct device *dev);
+bool is_swiotlb_allocated(void);
bool is_swiotlb_active(struct device *dev);
void __init swiotlb_adjust_size(unsigned long size);
+phys_addr_t default_swiotlb_base(void);
+phys_addr_t default_swiotlb_limit(void);
#else
static inline void swiotlb_init(bool addressing_limited, unsigned int flags)
{
}
+
+static inline void swiotlb_dev_init(struct device *dev)
+{
+}
+
static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{
return false;
@@ -151,6 +223,11 @@ static inline size_t swiotlb_max_mapping_size(struct device *dev)
return SIZE_MAX;
}
+static inline bool is_swiotlb_allocated(void)
+{
+ return false;
+}
+
static inline bool is_swiotlb_active(struct device *dev)
{
return false;
@@ -159,6 +236,16 @@ static inline bool is_swiotlb_active(struct device *dev)
static inline void swiotlb_adjust_size(unsigned long size)
{
}
+
+static inline phys_addr_t default_swiotlb_base(void)
+{
+ return 0;
+}
+
+static inline phys_addr_t default_swiotlb_limit(void)
+{
+ return 0;
+}
#endif /* CONFIG_SWIOTLB */
extern void swiotlb_print_info(void);
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index 48fabe36509e..8d8fac1626bd 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -41,6 +41,7 @@ enum {
enum switchtec_gen {
SWITCHTEC_GEN3,
SWITCHTEC_GEN4,
+ SWITCHTEC_GEN5,
};
struct mrpc_regs {
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index c0cb22cd607d..22bc6bc147f8 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -939,6 +939,7 @@ asmlinkage long sys_set_mempolicy_home_node(unsigned long start, unsigned long l
asmlinkage long sys_cachestat(unsigned int fd,
struct cachestat_range __user *cstat_range,
struct cachestat __user *cstat, unsigned int flags);
+asmlinkage long sys_map_shadow_stack(unsigned long addr, unsigned long size, unsigned int flags);
/*
* Architecture-specific system calls
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 59d451f455bf..09d7429d67c0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -159,12 +159,22 @@ struct ctl_node {
struct ctl_table_header *header;
};
-/* struct ctl_table_header is used to maintain dynamic lists of
- struct ctl_table trees. */
+/**
+ * struct ctl_table_header - maintains dynamic lists of struct ctl_table trees
+ * @ctl_table: pointer to the first element in ctl_table array
+ * @ctl_table_size: number of elements pointed by @ctl_table
+ * @used: The entry will never be touched when equal to 0.
+ * @count: Upped every time something is added to @inodes and downed every time
+ * something is removed from inodes
+ * @nreg: When nreg drops to 0 the ctl_table_header will be unregistered.
+ * @rcu: Delays the freeing of the inode. Introduced with "unfuck proc_sysctl ->d_compare()"
+ *
+ */
struct ctl_table_header {
union {
struct {
struct ctl_table *ctl_table;
+ int ctl_table_size;
int used;
int count;
int nreg;
@@ -205,6 +215,9 @@ struct ctl_path {
const char *procname;
};
+#define register_sysctl(path, table) \
+ register_sysctl_sz(path, table, ARRAY_SIZE(table))
+
#ifdef CONFIG_SYSCTL
void proc_sys_poll_notify(struct ctl_table_poll *poll);
@@ -216,14 +229,16 @@ extern void retire_sysctl_set(struct ctl_table_set *set);
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
- const char *path, struct ctl_table *table);
-struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table);
+ const char *path, struct ctl_table *table, size_t table_size);
+struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table,
+ size_t table_size);
void unregister_sysctl_table(struct ctl_table_header * table);
extern int sysctl_init_bases(void);
extern void __register_sysctl_init(const char *path, struct ctl_table *table,
- const char *table_name);
-#define register_sysctl_init(path, table) __register_sysctl_init(path, table, #table)
+ const char *table_name, size_t table_size);
+#define register_sysctl_init(path, table) \
+ __register_sysctl_init(path, table, #table, ARRAY_SIZE(table))
extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
void do_sysctl_args(void);
@@ -252,7 +267,9 @@ static inline struct ctl_table_header *register_sysctl_mount_point(const char *p
return NULL;
}
-static inline struct ctl_table_header *register_sysctl(const char *path, struct ctl_table *table)
+static inline struct ctl_table_header *register_sysctl_sz(const char *path,
+ struct ctl_table *table,
+ size_t table_size)
{
return NULL;
}
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index c1ef5fc60a3c..19cb803dd5ec 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -9,7 +9,8 @@
#include <linux/kernel.h>
#include <linux/platform_data/simplefb.h>
-#include <linux/screen_info.h>
+
+struct screen_info;
enum {
M_I17, /* 17-Inch iMac */
diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
index 3a582ec7a2f1..bdca467ebb77 100644
--- a/include/linux/sysrq.h
+++ b/include/linux/sysrq.h
@@ -30,7 +30,7 @@
#define SYSRQ_ENABLE_RTNICE 0x0100
struct sysrq_key_op {
- void (* const handler)(int);
+ void (* const handler)(u8);
const char * const help_msg;
const char * const action_msg;
const int enable_mask;
@@ -43,10 +43,10 @@ struct sysrq_key_op {
* are available -- else NULL's).
*/
-void handle_sysrq(int key);
-void __handle_sysrq(int key, bool check_mask);
-int register_sysrq_key(int key, const struct sysrq_key_op *op);
-int unregister_sysrq_key(int key, const struct sysrq_key_op *op);
+void handle_sysrq(u8 key);
+void __handle_sysrq(u8 key, bool check_mask);
+int register_sysrq_key(u8 key, const struct sysrq_key_op *op);
+int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op);
extern const struct sysrq_key_op *__sysrq_reboot_op;
int sysrq_toggle_support(int enable_mask);
@@ -54,20 +54,20 @@ int sysrq_mask(void);
#else
-static inline void handle_sysrq(int key)
+static inline void handle_sysrq(u8 key)
{
}
-static inline void __handle_sysrq(int key, bool check_mask)
+static inline void __handle_sysrq(u8 key, bool check_mask)
{
}
-static inline int register_sysrq_key(int key, const struct sysrq_key_op *op)
+static inline int register_sysrq_key(u8 key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
-static inline int unregister_sysrq_key(int key, const struct sysrq_key_op *op)
+static inline int unregister_sysrq_key(u8 key, const struct sysrq_key_op *op)
{
return -EINVAL;
}
diff --git a/include/linux/tca6416_keypad.h b/include/linux/tca6416_keypad.h
index b0d36a9934cc..5cf6f6f82aa7 100644
--- a/include/linux/tca6416_keypad.h
+++ b/include/linux/tca6416_keypad.h
@@ -25,7 +25,6 @@ struct tca6416_keys_platform_data {
unsigned int rep:1; /* enable input subsystem auto repeat */
uint16_t pinmask;
uint16_t invert;
- int irq_is_gpio;
int use_polling; /* use polling if Interrupt is not connected*/
};
#endif
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index b449a46766f5..a5ae4af955ff 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -53,6 +53,20 @@ enum thermal_notify_event {
THERMAL_EVENT_KEEP_ALIVE, /* Request for user space handler to respond */
};
+/**
+ * struct thermal_trip - representation of a point in temperature domain
+ * @temperature: temperature value in miliCelsius
+ * @hysteresis: relative hysteresis in miliCelsius
+ * @type: trip point type
+ * @priv: pointer to driver data associated with this trip
+ */
+struct thermal_trip {
+ int temperature;
+ int hysteresis;
+ enum thermal_trip_type type;
+ void *priv;
+};
+
struct thermal_zone_device_ops {
int (*bind) (struct thermal_zone_device *,
struct thermal_cooling_device *);
@@ -62,34 +76,16 @@ struct thermal_zone_device_ops {
int (*set_trips) (struct thermal_zone_device *, int, int);
int (*change_mode) (struct thermal_zone_device *,
enum thermal_device_mode);
- int (*get_trip_type) (struct thermal_zone_device *, int,
- enum thermal_trip_type *);
- int (*get_trip_temp) (struct thermal_zone_device *, int, int *);
int (*set_trip_temp) (struct thermal_zone_device *, int, int);
- int (*get_trip_hyst) (struct thermal_zone_device *, int, int *);
int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
- int (*get_trend) (struct thermal_zone_device *, int,
- enum thermal_trend *);
+ int (*get_trend) (struct thermal_zone_device *,
+ const struct thermal_trip *, enum thermal_trend *);
void (*hot)(struct thermal_zone_device *);
void (*critical)(struct thermal_zone_device *);
};
-/**
- * struct thermal_trip - representation of a point in temperature domain
- * @temperature: temperature value in miliCelsius
- * @hysteresis: relative hysteresis in miliCelsius
- * @type: trip point type
- * @priv: pointer to driver data associated with this trip
- */
-struct thermal_trip {
- int temperature;
- int hysteresis;
- enum thermal_trip_type type;
- void *priv;
-};
-
struct thermal_cooling_device_ops {
int (*get_max_state) (struct thermal_cooling_device *, unsigned long *);
int (*get_cur_state) (struct thermal_cooling_device *, unsigned long *);
@@ -304,16 +300,22 @@ int thermal_acpi_critical_trip_temp(struct acpi_device *adev, int *ret_temp);
#endif
#ifdef CONFIG_THERMAL
-struct thermal_zone_device *thermal_zone_device_register(const char *, int, int,
- void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
-
-void thermal_zone_device_unregister(struct thermal_zone_device *);
-
-struct thermal_zone_device *
-thermal_zone_device_register_with_trips(const char *, struct thermal_trip *, int, int,
- void *, struct thermal_zone_device_ops *,
- const struct thermal_zone_params *, int, int);
+struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay);
+
+struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp);
+
+void thermal_zone_device_unregister(struct thermal_zone_device *tz);
void *thermal_zone_device_priv(struct thermal_zone_device *tzd);
const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
@@ -354,15 +356,26 @@ int thermal_zone_device_enable(struct thermal_zone_device *tz);
int thermal_zone_device_disable(struct thermal_zone_device *tz);
void thermal_zone_device_critical(struct thermal_zone_device *tz);
#else
-static inline struct thermal_zone_device *thermal_zone_device_register(
- const char *type, int trips, int mask, void *devdata,
- struct thermal_zone_device_ops *ops,
- const struct thermal_zone_params *tzp,
- int passive_delay, int polling_delay)
+static inline struct thermal_zone_device *thermal_zone_device_register_with_trips(
+ const char *type,
+ struct thermal_trip *trips,
+ int num_trips, int mask,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp,
+ int passive_delay, int polling_delay)
{ return ERR_PTR(-ENODEV); }
-static inline void thermal_zone_device_unregister(
- struct thermal_zone_device *tz)
+
+static inline struct thermal_zone_device *thermal_tripless_zone_device_register(
+ const char *type,
+ void *devdata,
+ struct thermal_zone_device_ops *ops,
+ const struct thermal_zone_params *tzp)
+{ return ERR_PTR(-ENODEV); }
+
+static inline void thermal_zone_device_unregister(struct thermal_zone_device *tz)
{ }
+
static inline struct thermal_cooling_device *
thermal_cooling_device_register(const char *type, void *devdata,
const struct thermal_cooling_device_ops *ops)
diff --git a/include/linux/ti_wilink_st.h b/include/linux/ti_wilink_st.h
index 44a7f9169ac6..10642d4844f0 100644
--- a/include/linux/ti_wilink_st.h
+++ b/include/linux/ti_wilink_st.h
@@ -271,7 +271,7 @@ long st_kim_stop(void *);
void st_kim_complete(void *);
void kim_st_list_protocols(struct st_data_s *, void *);
-void st_kim_recv(void *, const unsigned char *, long);
+void st_kim_recv(void *disc_data, const u8 *data, size_t count);
/*
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index c1a0a19d80fb..21ae37e49319 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -62,13 +62,13 @@ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
/* Used to find the offset and length of dynamic fields in trace events */
struct trace_dynamic_info {
#ifdef CONFIG_CPU_BIG_ENDIAN
- u16 offset;
u16 len;
+ u16 offset;
#else
- u16 len;
u16 offset;
+ u16 len;
#endif
-};
+} __packed;
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -649,7 +649,7 @@ struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
- struct dentry *dir;
+ struct eventfs_file *ef;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
@@ -824,6 +824,7 @@ enum {
FILTER_RDYN_STRING,
FILTER_PTR_STRING,
FILTER_TRACE_FN,
+ FILTER_CPUMASK,
FILTER_COMM,
FILTER_CPU,
FILTER_STACKTRACE,
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 99912445974c..009072792fa3 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -21,6 +21,29 @@ struct file_operations;
#ifdef CONFIG_TRACING
+struct eventfs_file;
+
+struct dentry *eventfs_create_events_dir(const char *name,
+ struct dentry *parent);
+
+struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
+ struct dentry *parent);
+
+struct eventfs_file *eventfs_add_dir(const char *name,
+ struct eventfs_file *ef_parent);
+
+int eventfs_add_file(const char *name, umode_t mode,
+ struct eventfs_file *ef_parent, void *data,
+ const struct file_operations *fops);
+
+int eventfs_add_events_file(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops);
+
+void eventfs_remove(struct eventfs_file *ef);
+
+void eventfs_remove_events_dir(struct dentry *dentry);
+
struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
diff --git a/include/linux/tty.h b/include/linux/tty.h
index e8d5d9997aca..f002d0f25db7 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -192,13 +192,14 @@ struct tty_operations;
*/
struct tty_struct {
struct kref kref;
+ int index;
struct device *dev;
struct tty_driver *driver;
+ struct tty_port *port;
const struct tty_operations *ops;
- int index;
- struct ld_semaphore ldisc_sem;
struct tty_ldisc *ldisc;
+ struct ld_semaphore ldisc_sem;
struct mutex atomic_write_lock;
struct mutex legacy_mutex;
@@ -209,6 +210,7 @@ struct tty_struct {
char name[64];
unsigned long flags;
int count;
+ unsigned int receive_room;
struct winsize winsize;
struct {
@@ -219,16 +221,16 @@ struct tty_struct {
} __aligned(sizeof(unsigned long)) flow;
struct {
- spinlock_t lock;
struct pid *pgrp;
struct pid *session;
+ spinlock_t lock;
unsigned char pktstatus;
bool packet;
unsigned long unused[0];
} __aligned(sizeof(unsigned long)) ctrl;
bool hw_stopped;
- unsigned int receive_room;
+ bool closing;
int flow_change;
struct tty_struct *link;
@@ -239,15 +241,13 @@ struct tty_struct {
void *disc_data;
void *driver_data;
spinlock_t files_lock;
+ int write_cnt;
+ unsigned char *write_buf;
+
struct list_head tty_files;
#define N_TTY_BUF_SIZE 4096
-
- int closing;
- unsigned char *write_buf;
- int write_cnt;
struct work_struct SAK_work;
- struct tty_port *port;
} __randomize_layout;
/* Each of a tty's open files has private_data pointing to tty_file_private */
diff --git a/include/linux/tty_buffer.h b/include/linux/tty_buffer.h
index 6ceb2789e6c8..31125e3be3c5 100644
--- a/include/linux/tty_buffer.h
+++ b/include/linux/tty_buffer.h
@@ -12,24 +12,24 @@ struct tty_buffer {
struct tty_buffer *next;
struct llist_node free;
};
- int used;
- int size;
- int commit;
- int lookahead; /* Lazy update on recv, can become less than "read" */
- int read;
+ unsigned int used;
+ unsigned int size;
+ unsigned int commit;
+ unsigned int lookahead; /* Lazy update on recv, can become less than "read" */
+ unsigned int read;
bool flags;
/* Data points here */
- unsigned long data[];
+ u8 data[] __aligned(sizeof(unsigned long));
};
-static inline unsigned char *char_buf_ptr(struct tty_buffer *b, int ofs)
+static inline u8 *char_buf_ptr(struct tty_buffer *b, unsigned int ofs)
{
- return ((unsigned char *)b->data) + ofs;
+ return b->data + ofs;
}
-static inline char *flag_buf_ptr(struct tty_buffer *b, int ofs)
+static inline u8 *flag_buf_ptr(struct tty_buffer *b, unsigned int ofs)
{
- return (char *)char_buf_ptr(b, ofs) + b->size;
+ return char_buf_ptr(b, ofs) + b->size;
}
struct tty_bufhead {
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index e00034118c7b..18beff0cec1a 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -72,8 +72,8 @@ struct serial_struct;
* is closed for the last time freeing up the resources. This is
* actually the second part of shutdown for routines that might sleep.
*
- * @write: ``int ()(struct tty_struct *tty, const unsigned char *buf,
- * int count)``
+ * @write: ``ssize_t ()(struct tty_struct *tty, const unsigned char *buf,
+ * size_t count)``
*
* This routine is called by the kernel to write a series (@count) of
* characters (@buf) to the @tty device. The characters may come from
@@ -356,9 +356,8 @@ struct tty_operations {
void (*close)(struct tty_struct * tty, struct file * filp);
void (*shutdown)(struct tty_struct *tty);
void (*cleanup)(struct tty_struct *tty);
- int (*write)(struct tty_struct * tty,
- const unsigned char *buf, int count);
- int (*put_char)(struct tty_struct *tty, unsigned char ch);
+ ssize_t (*write)(struct tty_struct *tty, const u8 *buf, size_t count);
+ int (*put_char)(struct tty_struct *tty, u8 ch);
void (*flush_chars)(struct tty_struct *tty);
unsigned int (*write_room)(struct tty_struct *tty);
unsigned int (*chars_in_buffer)(struct tty_struct *tty);
diff --git a/include/linux/tty_flip.h b/include/linux/tty_flip.h
index bfaaeee61a05..af4fce98f64e 100644
--- a/include/linux/tty_flip.h
+++ b/include/linux/tty_flip.h
@@ -10,17 +10,59 @@ struct tty_ldisc;
int tty_buffer_set_limit(struct tty_port *port, int limit);
unsigned int tty_buffer_space_avail(struct tty_port *port);
int tty_buffer_request_room(struct tty_port *port, size_t size);
-int tty_insert_flip_string_flags(struct tty_port *port,
- const unsigned char *chars, const char *flags, size_t size);
-int tty_insert_flip_string_fixed_flag(struct tty_port *port,
- const unsigned char *chars, char flag, size_t size);
-int tty_prepare_flip_string(struct tty_port *port, unsigned char **chars,
- size_t size);
+size_t __tty_insert_flip_string_flags(struct tty_port *port, const u8 *chars,
+ const u8 *flags, bool mutable_flags,
+ size_t size);
+size_t tty_prepare_flip_string(struct tty_port *port, u8 **chars, size_t size);
void tty_flip_buffer_push(struct tty_port *port);
-int __tty_insert_flip_char(struct tty_port *port, unsigned char ch, char flag);
-static inline int tty_insert_flip_char(struct tty_port *port,
- unsigned char ch, char flag)
+/**
+ * tty_insert_flip_string_fixed_flag - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flag: flag value for each character
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. All the characters passed are
+ * marked with the supplied flag.
+ *
+ * Returns: the number added.
+ */
+static inline size_t tty_insert_flip_string_fixed_flag(struct tty_port *port,
+ const u8 *chars, u8 flag,
+ size_t size)
+{
+ return __tty_insert_flip_string_flags(port, chars, &flag, false, size);
+}
+
+/**
+ * tty_insert_flip_string_flags - add characters to the tty buffer
+ * @port: tty port
+ * @chars: characters
+ * @flags: flag bytes
+ * @size: size
+ *
+ * Queue a series of bytes to the tty buffering. For each character the flags
+ * array indicates the status of the character.
+ *
+ * Returns: the number added.
+ */
+static inline size_t tty_insert_flip_string_flags(struct tty_port *port,
+ const u8 *chars,
+ const u8 *flags, size_t size)
+{
+ return __tty_insert_flip_string_flags(port, chars, flags, true, size);
+}
+
+/**
+ * tty_insert_flip_char - add one character to the tty buffer
+ * @port: tty port
+ * @ch: character
+ * @flag: flag byte
+ *
+ * Queue a single byte @ch to the tty buffering, with an optional flag.
+ */
+static inline size_t tty_insert_flip_char(struct tty_port *port, u8 ch, u8 flag)
{
struct tty_buffer *tb = port->buf.tail;
int change;
@@ -32,17 +74,17 @@ static inline int tty_insert_flip_char(struct tty_port *port,
*char_buf_ptr(tb, tb->used++) = ch;
return 1;
}
- return __tty_insert_flip_char(port, ch, flag);
+ return __tty_insert_flip_string_flags(port, &ch, &flag, false, 1);
}
-static inline int tty_insert_flip_string(struct tty_port *port,
- const unsigned char *chars, size_t size)
+static inline size_t tty_insert_flip_string(struct tty_port *port,
+ const u8 *chars, size_t size)
{
return tty_insert_flip_string_fixed_flag(port, chars, TTY_NORMAL, size);
}
-int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
- const char *f, int count);
+size_t tty_ldisc_receive_buf(struct tty_ldisc *ld, const u8 *p, const u8 *f,
+ size_t count);
void tty_buffer_lock_exclusive(struct tty_port *port);
void tty_buffer_unlock_exclusive(struct tty_port *port);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 49dc172dedc7..af01e89074b2 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -71,7 +71,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* call to @receive_buf(). Returning an error will prevent the ldisc from
* being attached.
*
- * Can sleep.
+ * Optional. Can sleep.
*
* @close: [TTY] ``void ()(struct tty_struct *tty)``
*
@@ -80,7 +80,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* changed to use a new line discipline. At the point of execution no
* further users will enter the ldisc code for this tty.
*
- * Can sleep.
+ * Optional. Can sleep.
*
* @flush_buffer: [TTY] ``void ()(struct tty_struct *tty)``
*
@@ -88,8 +88,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* input characters it may have queued to be delivered to the user mode
* process. It may be called at any point between open and close.
*
- * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
- * unsigned char *buf, size_t nr)``
+ * Optional.
+ *
+ * @read: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file, u8 *buf,
+ * size_t nr)``
*
* This function is called when the user requests to read from the @tty.
* The line discipline will return whatever characters it has buffered up
@@ -97,10 +99,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* an %EIO error. Multiple read calls may occur in parallel and the ldisc
* must deal with serialization issues.
*
- * Can sleep.
+ * Optional: %EIO unless provided. Can sleep.
*
* @write: [TTY] ``ssize_t ()(struct tty_struct *tty, struct file *file,
- * const unsigned char *buf, size_t nr)``
+ * const u8 *buf, size_t nr)``
*
* This function is called when the user requests to write to the @tty.
* The line discipline will deliver the characters to the low-level tty
@@ -108,7 +110,7 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* characters first. If this function is not defined, the user will
* receive an %EIO error.
*
- * Can sleep.
+ * Optional: %EIO unless provided. Can sleep.
*
* @ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
* unsigned long arg)``
@@ -120,6 +122,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* discpline. So a low-level driver can "grab" an ioctl request before
* the line discpline has a chance to see it.
*
+ * Optional.
+ *
* @compat_ioctl: [TTY] ``int ()(struct tty_struct *tty, unsigned int cmd,
* unsigned long arg)``
*
@@ -130,11 +134,15 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* a pointer to wordsize-sensitive structure belongs here, but most of
* ldiscs will happily leave it %NULL.
*
+ * Optional.
+ *
* @set_termios: [TTY] ``void ()(struct tty_struct *tty, const struct ktermios *old)``
*
* This function notifies the line discpline that a change has been made
* to the termios structure.
*
+ * Optional.
+ *
* @poll: [TTY] ``int ()(struct tty_struct *tty, struct file *file,
* struct poll_table_struct *wait)``
*
@@ -142,6 +150,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* device. It is solely the responsibility of the line discipline to
* handle poll requests.
*
+ * Optional.
+ *
* @hangup: [TTY] ``void ()(struct tty_struct *tty)``
*
* Called on a hangup. Tells the discipline that it should cease I/O to
@@ -149,10 +159,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* but should wait until any pending driver I/O is completed. No further
* calls into the ldisc code will occur.
*
- * Can sleep.
+ * Optional. Can sleep.
*
- * @receive_buf: [DRV] ``void ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * @receive_buf: [DRV] ``void ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver to send characters
* received by the hardware to the line discpline for processing. @cp is
@@ -161,6 +171,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* character was received with a parity error, etc. @fp may be %NULL to
* indicate all data received is %TTY_NORMAL.
*
+ * Optional.
+ *
* @write_wakeup: [DRV] ``void ()(struct tty_struct *tty)``
*
* This function is called by the low-level tty driver to signal that line
@@ -170,13 +182,17 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* send, please arise a tasklet or workqueue to do the real data transfer.
* Do not send data in this hook, it may lead to a deadlock.
*
+ * Optional.
+ *
* @dcd_change: [DRV] ``void ()(struct tty_struct *tty, bool active)``
*
* Tells the discipline that the DCD pin has changed its status. Used
* exclusively by the %N_PPS (Pulse-Per-Second) line discipline.
*
- * @receive_buf2: [DRV] ``int ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * Optional.
+ *
+ * @receive_buf2: [DRV] ``ssize_t ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver to send characters
* received by the hardware to the line discpline for processing. @cp is a
@@ -186,8 +202,10 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* indicate all data received is %TTY_NORMAL. If assigned, prefer this
* function for automatic flow control.
*
- * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty,
- * const unsigned char *cp, const char *fp, int count)``
+ * Optional.
+ *
+ * @lookahead_buf: [DRV] ``void ()(struct tty_struct *tty, const u8 *cp,
+ * const u8 *fp, size_t count)``
*
* This function is called by the low-level tty driver for characters
* not eaten by ->receive_buf() or ->receive_buf2(). It is useful for
@@ -198,6 +216,8 @@ int ldsem_down_write_nested(struct ld_semaphore *sem, int subclass,
* same characters (e.g. by skipping the actions for high-priority
* characters already handled by ->lookahead_buf()).
*
+ * Optional.
+ *
* @owner: module containting this ldisc (for reference counting)
*
* This structure defines the interface between the tty line discipline
@@ -218,11 +238,10 @@ struct tty_ldisc_ops {
int (*open)(struct tty_struct *tty);
void (*close)(struct tty_struct *tty);
void (*flush_buffer)(struct tty_struct *tty);
- ssize_t (*read)(struct tty_struct *tty, struct file *file,
- unsigned char *buf, size_t nr,
- void **cookie, unsigned long offset);
+ ssize_t (*read)(struct tty_struct *tty, struct file *file, u8 *buf,
+ size_t nr, void **cookie, unsigned long offset);
ssize_t (*write)(struct tty_struct *tty, struct file *file,
- const unsigned char *buf, size_t nr);
+ const u8 *buf, size_t nr);
int (*ioctl)(struct tty_struct *tty, unsigned int cmd,
unsigned long arg);
int (*compat_ioctl)(struct tty_struct *tty, unsigned int cmd,
@@ -235,14 +254,14 @@ struct tty_ldisc_ops {
/*
* The following routines are called from below.
*/
- void (*receive_buf)(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count);
+ void (*receive_buf)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
void (*write_wakeup)(struct tty_struct *tty);
void (*dcd_change)(struct tty_struct *tty, bool active);
- int (*receive_buf2)(struct tty_struct *tty, const unsigned char *cp,
- const char *fp, int count);
- void (*lookahead_buf)(struct tty_struct *tty, const unsigned char *cp,
- const unsigned char *fp, unsigned int count);
+ size_t (*receive_buf2)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
+ void (*lookahead_buf)(struct tty_struct *tty, const u8 *cp,
+ const u8 *fp, size_t count);
struct module *owner;
};
diff --git a/include/linux/tty_port.h b/include/linux/tty_port.h
index edf685a24f7c..6b367eb17979 100644
--- a/include/linux/tty_port.h
+++ b/include/linux/tty_port.h
@@ -39,9 +39,10 @@ struct tty_port_operations {
};
struct tty_port_client_operations {
- int (*receive_buf)(struct tty_port *port, const unsigned char *, const unsigned char *, size_t);
- void (*lookahead_buf)(struct tty_port *port, const unsigned char *cp,
- const unsigned char *fp, unsigned int count);
+ size_t (*receive_buf)(struct tty_port *port, const u8 *cp, const u8 *fp,
+ size_t count);
+ void (*lookahead_buf)(struct tty_port *port, const u8 *cp,
+ const u8 *fp, size_t count);
void (*write_wakeup)(struct tty_port *port);
};
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 25f8e62a30ec..a21074861f91 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -25,7 +25,6 @@
struct usb_device;
struct usb_driver;
-struct wusb_dev;
/*-------------------------------------------------------------------------*/
@@ -425,7 +424,6 @@ struct usb_host_config {
struct usb_host_bos {
struct usb_bos_descriptor *desc;
- /* wireless cap descriptor is handled by wusb */
struct usb_ext_cap_descriptor *ext_cap;
struct usb_ss_cap_descriptor *ss_cap;
struct usb_ssp_cap_descriptor *ssp_cap;
@@ -612,7 +610,6 @@ struct usb3_lpm_parameters {
* WUSB devices are not, until we authorize them from user space.
* FIXME -- complete doc
* @authenticated: Crypto authentication passed
- * @wusb: device is Wireless USB
* @lpm_capable: device supports LPM
* @lpm_devinit_allow: Allow USB3 device initiated LPM, exit latency is in range
* @usb2_hw_lpm_capable: device can perform USB2 hardware LPM
@@ -634,8 +631,6 @@ struct usb3_lpm_parameters {
* @do_remote_wakeup: remote wakeup should be enabled
* @reset_resume: needs reset instead of resume
* @port_is_suspended: the upstream port is suspended (L2 or U3)
- * @wusb_dev: if this is a Wireless USB device, link to the WUSB
- * specific data for the device.
* @slot_id: Slot ID assigned by xHCI
* @removable: Device can be physically removed from this port
* @l1_params: best effor service latency for USB2 L1 LPM state, and L1 timeout.
@@ -696,7 +691,6 @@ struct usb_device {
unsigned have_langid:1;
unsigned authorized:1;
unsigned authenticated:1;
- unsigned wusb:1;
unsigned lpm_capable:1;
unsigned lpm_devinit_allow:1;
unsigned usb2_hw_lpm_capable:1;
@@ -727,7 +721,6 @@ struct usb_device {
unsigned reset_resume:1;
unsigned port_is_suspended:1;
- struct wusb_dev *wusb_dev;
int slot_id;
struct usb2_lpm_parameters l1_params;
struct usb3_lpm_parameters u1_params;
@@ -1742,11 +1735,6 @@ static inline void usb_fill_bulk_urb(struct urb *urb,
* encoding of the endpoint interval, and express polling intervals in
* microframes (eight per millisecond) rather than in frames (one per
* millisecond).
- *
- * Wireless USB also uses the logarithmic encoding, but specifies it in units of
- * 128us instead of 125us. For Wireless USB devices, the interval is passed
- * through to the host controller, rather than being translated into microframe
- * units.
*/
static inline void usb_fill_int_urb(struct urb *urb,
struct usb_device *dev,
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h
index 969e7dba6358..c93b410b314a 100644
--- a/include/linux/usb/ch9.h
+++ b/include/linux/usb/ch9.h
@@ -3,7 +3,7 @@
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
- * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that
* need these:
*
* - the host side Linux-USB kernel driver API;
@@ -14,9 +14,6 @@
* act either as a USB host or as a USB device. That means the host and
* device side APIs benefit from working well together.
*
- * There's also "Wireless USB", using low power short range radios for
- * peripheral interconnection but otherwise building on the USB framework.
- *
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index ee38835ed77c..0b4f2d5faa08 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -63,6 +63,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_IMX_IS_HSIC BIT(14)
#define CI_HDRC_PMQOS BIT(15)
#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
+#define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 07531c4f4350..6014340ba980 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -450,29 +450,6 @@ static inline struct usb_composite_driver *to_cdriver(
*
* One of these devices is allocated and initialized before the
* associated device driver's bind() is called.
- *
- * OPEN ISSUE: it appears that some WUSB devices will need to be
- * built by combining a normal (wired) gadget with a wireless one.
- * This revision of the gadget framework should probably try to make
- * sure doing that won't hurt too much.
- *
- * One notion for how to handle Wireless USB devices involves:
- *
- * (a) a second gadget here, discovery mechanism TBD, but likely
- * needing separate "register/unregister WUSB gadget" calls;
- * (b) updates to usb_gadget to include flags "is it wireless",
- * "is it wired", plus (presumably in a wrapper structure)
- * bandgroup and PHY info;
- * (c) presumably a wireless_ep wrapping a usb_ep, and reporting
- * wireless-specific parameters like maxburst and maxsequence;
- * (d) configurations that are specific to wireless links;
- * (e) function drivers that understand wireless configs and will
- * support wireless for (additional) function instances;
- * (f) a function to support association setup (like CBAF), not
- * necessarily requiring a wireless adapter;
- * (g) composite device setup that can create one or more wireless
- * configs, including appropriate association setup support;
- * (h) more, TBD.
*/
struct usb_composite_dev {
struct usb_gadget *gadget;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 4e9623e8492b..61d4f0b793dc 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -154,7 +154,6 @@ struct usb_hcd {
/* The next flag is a stopgap, to be removed when all the HCDs
* support the new root-hub polling mechanism. */
unsigned uses_new_polling:1;
- unsigned wireless:1; /* Wireless USB HCD */
unsigned has_tt:1; /* Integrated TT in root hub */
unsigned amd_resume_bug:1; /* AMD remote wakeup quirk */
unsigned can_do_streams:1; /* HC supports streams */
@@ -249,7 +248,6 @@ struct hc_driver {
#define HCD_SHARED 0x0004 /* Two (or more) usb_hcds share HW */
#define HCD_USB11 0x0010 /* USB 1.1 */
#define HCD_USB2 0x0020 /* USB 2.0 */
-#define HCD_USB25 0x0030 /* Wireless USB 1.0 (USB 2.5)*/
#define HCD_USB3 0x0040 /* USB 3.0 */
#define HCD_USB31 0x0050 /* USB 3.1 */
#define HCD_USB32 0x0060 /* USB 3.2 */
diff --git a/include/linux/usb/phy.h b/include/linux/usb/phy.h
index e4de6bc1f69b..b513749582d7 100644
--- a/include/linux/usb/phy.h
+++ b/include/linux/usb/phy.h
@@ -144,6 +144,10 @@ struct usb_phy {
*/
int (*set_wakeup)(struct usb_phy *x, bool enabled);
+ /* notify phy port status change */
+ int (*notify_port_status)(struct usb_phy *x, int port,
+ u16 portstatus, u16 portchange);
+
/* notify phy connect status change */
int (*notify_connect)(struct usb_phy *x,
enum usb_device_speed speed);
@@ -317,6 +321,15 @@ usb_phy_set_wakeup(struct usb_phy *x, bool enabled)
}
static inline int
+usb_phy_notify_port_status(struct usb_phy *x, int port, u16 portstatus, u16 portchange)
+{
+ if (x && x->notify_port_status)
+ return x->notify_port_status(x, port, portstatus, portchange);
+ else
+ return 0;
+}
+
+static inline int
usb_phy_notify_connect(struct usb_phy *x, enum usb_device_speed speed)
{
if (x && x->notify_connect)
diff --git a/include/linux/usb/tcpci.h b/include/linux/usb/tcpci.h
index 85e95a3251d3..83376473ac76 100644
--- a/include/linux/usb/tcpci.h
+++ b/include/linux/usb/tcpci.h
@@ -103,6 +103,7 @@
#define TCPC_POWER_STATUS_SINKING_VBUS BIT(0)
#define TCPC_FAULT_STATUS 0x1f
+#define TCPC_FAULT_STATUS_ALL_REG_RST_TO_DEFAULT BIT(7)
#define TCPC_ALERT_EXTENDED 0x21
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index 350d49012659..28aeef8f9e7b 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -67,7 +67,7 @@ struct typec_altmode_ops {
int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
int typec_altmode_exit(struct typec_altmode *altmode);
-void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
+int typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
int typec_altmode_vdm(struct typec_altmode *altmode,
const u32 header, const u32 *vdo, int count);
int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index ac7b0c96d351..ac8c6854097c 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -46,6 +46,7 @@ enum mfill_atomic_mode {
MFILL_ATOMIC_COPY,
MFILL_ATOMIC_ZEROPAGE,
MFILL_ATOMIC_CONTINUE,
+ MFILL_ATOMIC_POISON,
NR_MFILL_ATOMIC_MODES,
};
@@ -83,6 +84,9 @@ extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long len, atomic_t *mmap_changing,
uffd_flags_t flags);
+extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
+ unsigned long len, atomic_t *mmap_changing,
+ uffd_flags_t flags);
extern int mwriteprotect_range(struct mm_struct *dst_mm,
unsigned long start, unsigned long len,
bool enable_wp, atomic_t *mmap_changing);
diff --git a/include/linux/vdpa.h b/include/linux/vdpa.h
index db1b0eaef4eb..0e652026b776 100644
--- a/include/linux/vdpa.h
+++ b/include/linux/vdpa.h
@@ -208,6 +208,9 @@ struct vdpa_map_file {
* @vdev: vdpa device
* Returns the virtio features support by the
* device
+ * @get_backend_features: Get parent-specific backend features (optional)
+ * Returns the vdpa features supported by the
+ * device.
* @set_driver_features: Set virtio features supported by the driver
* @vdev: vdpa device
* @features: feature support by the driver
@@ -358,6 +361,7 @@ struct vdpa_config_ops {
u32 (*get_vq_align)(struct vdpa_device *vdev);
u32 (*get_vq_group)(struct vdpa_device *vdev, u16 idx);
u64 (*get_device_features)(struct vdpa_device *vdev);
+ u64 (*get_backend_features)(const struct vdpa_device *vdev);
int (*set_driver_features)(struct vdpa_device *vdev, u64 features);
u64 (*get_driver_features)(struct vdpa_device *vdev);
void (*set_config_cb)(struct vdpa_device *vdev,
diff --git a/include/linux/vfio.h b/include/linux/vfio.h
index 2c137ea94a3e..454e9295970c 100644
--- a/include/linux/vfio.h
+++ b/include/linux/vfio.h
@@ -13,6 +13,7 @@
#include <linux/mm.h>
#include <linux/workqueue.h>
#include <linux/poll.h>
+#include <linux/cdev.h>
#include <uapi/linux/vfio.h>
#include <linux/iova_bitmap.h>
@@ -42,7 +43,11 @@ struct vfio_device {
*/
const struct vfio_migration_ops *mig_ops;
const struct vfio_log_ops *log_ops;
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
struct vfio_group *group;
+ struct list_head group_next;
+ struct list_head iommu_entry;
+#endif
struct vfio_device_set *dev_set;
struct list_head dev_set_list;
unsigned int migration_flags;
@@ -51,17 +56,19 @@ struct vfio_device {
/* Members below here are private, not for driver use */
unsigned int index;
struct device device; /* device.kref covers object life circle */
+#if IS_ENABLED(CONFIG_VFIO_DEVICE_CDEV)
+ struct cdev cdev;
+#endif
refcount_t refcount; /* user count on registered device*/
unsigned int open_count;
struct completion comp;
- struct list_head group_next;
- struct list_head iommu_entry;
struct iommufd_access *iommufd_access;
void (*put_kvm)(struct kvm *kvm);
#if IS_ENABLED(CONFIG_IOMMUFD)
struct iommufd_device *iommufd_device;
- bool iommufd_attached;
+ u8 iommufd_attached:1;
#endif
+ u8 cdev_opened:1;
};
/**
@@ -73,7 +80,9 @@ struct vfio_device {
* @bind_iommufd: Called when binding the device to an iommufd
* @unbind_iommufd: Opposite of bind_iommufd
* @attach_ioas: Called when attaching device to an IOAS/HWPT managed by the
- * bound iommufd. Undo in unbind_iommufd.
+ * bound iommufd. Undo in unbind_iommufd if @detach_ioas is not
+ * called.
+ * @detach_ioas: Opposite of attach_ioas
* @open_device: Called when the first file descriptor is opened for this device
* @close_device: Opposite of open_device
* @read: Perform read(2) on device file descriptor
@@ -97,6 +106,7 @@ struct vfio_device_ops {
struct iommufd_ctx *ictx, u32 *out_device_id);
void (*unbind_iommufd)(struct vfio_device *vdev);
int (*attach_ioas)(struct vfio_device *vdev, u32 *pt_id);
+ void (*detach_ioas)(struct vfio_device *vdev);
int (*open_device)(struct vfio_device *vdev);
void (*close_device)(struct vfio_device *vdev);
ssize_t (*read)(struct vfio_device *vdev, char __user *buf,
@@ -114,15 +124,31 @@ struct vfio_device_ops {
};
#if IS_ENABLED(CONFIG_IOMMUFD)
+struct iommufd_ctx *vfio_iommufd_device_ictx(struct vfio_device *vdev);
+int vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx);
int vfio_iommufd_physical_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id);
void vfio_iommufd_physical_unbind(struct vfio_device *vdev);
int vfio_iommufd_physical_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
+void vfio_iommufd_physical_detach_ioas(struct vfio_device *vdev);
int vfio_iommufd_emulated_bind(struct vfio_device *vdev,
struct iommufd_ctx *ictx, u32 *out_device_id);
void vfio_iommufd_emulated_unbind(struct vfio_device *vdev);
int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
+void vfio_iommufd_emulated_detach_ioas(struct vfio_device *vdev);
#else
+static inline struct iommufd_ctx *
+vfio_iommufd_device_ictx(struct vfio_device *vdev)
+{
+ return NULL;
+}
+
+static inline int
+vfio_iommufd_get_dev_id(struct vfio_device *vdev, struct iommufd_ctx *ictx)
+{
+ return VFIO_PCI_DEVID_NOT_OWNED;
+}
+
#define vfio_iommufd_physical_bind \
((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
u32 *out_device_id)) NULL)
@@ -130,6 +156,8 @@ int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
((void (*)(struct vfio_device *vdev)) NULL)
#define vfio_iommufd_physical_attach_ioas \
((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
+#define vfio_iommufd_physical_detach_ioas \
+ ((void (*)(struct vfio_device *vdev)) NULL)
#define vfio_iommufd_emulated_bind \
((int (*)(struct vfio_device *vdev, struct iommufd_ctx *ictx, \
u32 *out_device_id)) NULL)
@@ -137,8 +165,15 @@ int vfio_iommufd_emulated_attach_ioas(struct vfio_device *vdev, u32 *pt_id);
((void (*)(struct vfio_device *vdev)) NULL)
#define vfio_iommufd_emulated_attach_ioas \
((int (*)(struct vfio_device *vdev, u32 *pt_id)) NULL)
+#define vfio_iommufd_emulated_detach_ioas \
+ ((void (*)(struct vfio_device *vdev)) NULL)
#endif
+static inline bool vfio_device_cdev_opened(struct vfio_device *device)
+{
+ return device->cdev_opened;
+}
+
/**
* struct vfio_migration_ops - VFIO bus device driver migration callbacks
*
@@ -239,20 +274,44 @@ void vfio_unregister_group_dev(struct vfio_device *device);
int vfio_assign_device_set(struct vfio_device *device, void *set_id);
unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set);
+struct vfio_device *
+vfio_find_device_in_devset(struct vfio_device_set *dev_set,
+ struct device *dev);
int vfio_mig_get_next_state(struct vfio_device *device,
enum vfio_device_mig_state cur_fsm,
enum vfio_device_mig_state new_fsm,
enum vfio_device_mig_state *next_fsm);
+void vfio_combine_iova_ranges(struct rb_root_cached *root, u32 cur_nodes,
+ u32 req_nodes);
+
/*
* External user API
*/
+#if IS_ENABLED(CONFIG_VFIO_GROUP)
struct iommu_group *vfio_file_iommu_group(struct file *file);
bool vfio_file_is_group(struct file *file);
+bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
+#else
+static inline struct iommu_group *vfio_file_iommu_group(struct file *file)
+{
+ return NULL;
+}
+
+static inline bool vfio_file_is_group(struct file *file)
+{
+ return false;
+}
+
+static inline bool vfio_file_has_dev(struct file *file, struct vfio_device *device)
+{
+ return false;
+}
+#endif
+bool vfio_file_is_valid(struct file *file);
bool vfio_file_enforced_coherent(struct file *file);
void vfio_file_set_kvm(struct file *file, struct kvm *kvm);
-bool vfio_file_has_dev(struct file *file, struct vfio_device *device);
#define VFIO_PIN_PAGES_MAX_ENTRIES (PAGE_SIZE/sizeof(unsigned long))
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h
index b4b9137f9792..97129a1bbb7d 100644
--- a/include/linux/vgaarb.h
+++ b/include/linux/vgaarb.h
@@ -1,3 +1,5 @@
+/* SPDX-License-Identifier: MIT */
+
/*
* The VGA aribiter manages VGA space routing and VGA resource decode to
* allow multiple VGA devices to be used in a system in a safe way.
@@ -5,27 +7,6 @@
* (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
* (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
* (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS
- * IN THE SOFTWARE.
- *
*/
#ifndef LINUX_VGA_H
@@ -96,7 +77,7 @@ static inline int vga_client_register(struct pci_dev *pdev,
static inline int vga_get_interruptible(struct pci_dev *pdev,
unsigned int rsrc)
{
- return vga_get(pdev, rsrc, 1);
+ return vga_get(pdev, rsrc, 1);
}
/**
@@ -111,7 +92,7 @@ static inline int vga_get_interruptible(struct pci_dev *pdev,
static inline int vga_get_uninterruptible(struct pci_dev *pdev,
unsigned int rsrc)
{
- return vga_get(pdev, rsrc, 0);
+ return vga_get(pdev, rsrc, 0);
}
static inline void vga_client_unregister(struct pci_dev *pdev)
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index de6041deee37..4cc614a38376 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -9,6 +9,7 @@
#include <linux/device.h>
#include <linux/mod_devicetable.h>
#include <linux/gfp.h>
+#include <linux/dma-mapping.h>
/**
* struct virtqueue - a queue to register buffers for sending or receiving.
@@ -61,6 +62,8 @@ int virtqueue_add_sgs(struct virtqueue *vq,
void *data,
gfp_t gfp);
+struct device *virtqueue_dma_dev(struct virtqueue *vq);
+
bool virtqueue_kick(struct virtqueue *vq);
bool virtqueue_kick_prepare(struct virtqueue *vq);
@@ -78,6 +81,8 @@ bool virtqueue_enable_cb(struct virtqueue *vq);
unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq);
+int virtqueue_set_dma_premapped(struct virtqueue *_vq);
+
bool virtqueue_poll(struct virtqueue *vq, unsigned);
bool virtqueue_enable_cb_delayed(struct virtqueue *vq);
@@ -95,6 +100,8 @@ dma_addr_t virtqueue_get_used_addr(const struct virtqueue *vq);
int virtqueue_resize(struct virtqueue *vq, u32 num,
void (*recycle)(struct virtqueue *vq, void *buf));
+int virtqueue_reset(struct virtqueue *vq,
+ void (*recycle)(struct virtqueue *vq, void *buf));
/**
* struct virtio_device - representation of a device using virtio
@@ -184,10 +191,8 @@ struct virtio_driver {
void (*scan)(struct virtio_device *dev);
void (*remove)(struct virtio_device *dev);
void (*config_changed)(struct virtio_device *dev);
-#ifdef CONFIG_PM
int (*freeze)(struct virtio_device *dev);
int (*restore)(struct virtio_device *dev);
-#endif
};
static inline struct virtio_driver *drv_to_virtio(struct device_driver *drv)
@@ -206,4 +211,19 @@ void unregister_virtio_driver(struct virtio_driver *drv);
#define module_virtio_driver(__virtio_driver) \
module_driver(__virtio_driver, register_virtio_driver, \
unregister_virtio_driver)
+
+dma_addr_t virtqueue_dma_map_single_attrs(struct virtqueue *_vq, void *ptr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs);
+void virtqueue_dma_unmap_single_attrs(struct virtqueue *_vq, dma_addr_t addr,
+ size_t size, enum dma_data_direction dir,
+ unsigned long attrs);
+int virtqueue_dma_mapping_error(struct virtqueue *_vq, dma_addr_t addr);
+
+bool virtqueue_dma_need_sync(struct virtqueue *_vq, dma_addr_t addr);
+void virtqueue_dma_sync_single_range_for_cpu(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
+void virtqueue_dma_sync_single_range_for_device(struct virtqueue *_vq, dma_addr_t addr,
+ unsigned long offset, size_t size,
+ enum dma_data_direction dir);
#endif /* _LINUX_VIRTIO_H */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 683efe29fa69..1c1d06804d45 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -125,6 +125,17 @@ struct rcu_work {
struct workqueue_struct *wq;
};
+enum wq_affn_scope {
+ WQ_AFFN_DFL, /* use system default */
+ WQ_AFFN_CPU, /* one pod per CPU */
+ WQ_AFFN_SMT, /* one pod poer SMT */
+ WQ_AFFN_CACHE, /* one pod per LLC */
+ WQ_AFFN_NUMA, /* one pod per NUMA node */
+ WQ_AFFN_SYSTEM, /* one pod across the whole system */
+
+ WQ_AFFN_NR_TYPES,
+};
+
/**
* struct workqueue_attrs - A struct for workqueue attributes.
*
@@ -138,17 +149,58 @@ struct workqueue_attrs {
/**
* @cpumask: allowed CPUs
+ *
+ * Work items in this workqueue are affine to these CPUs and not allowed
+ * to execute on other CPUs. A pool serving a workqueue must have the
+ * same @cpumask.
*/
cpumask_var_t cpumask;
/**
- * @no_numa: disable NUMA affinity
+ * @__pod_cpumask: internal attribute used to create per-pod pools
+ *
+ * Internal use only.
+ *
+ * Per-pod unbound worker pools are used to improve locality. Always a
+ * subset of ->cpumask. A workqueue can be associated with multiple
+ * worker pools with disjoint @__pod_cpumask's. Whether the enforcement
+ * of a pool's @__pod_cpumask is strict depends on @affn_strict.
+ */
+ cpumask_var_t __pod_cpumask;
+
+ /**
+ * @affn_strict: affinity scope is strict
+ *
+ * If clear, workqueue will make a best-effort attempt at starting the
+ * worker inside @__pod_cpumask but the scheduler is free to migrate it
+ * outside.
*
- * Unlike other fields, ``no_numa`` isn't a property of a worker_pool. It
- * only modifies how :c:func:`apply_workqueue_attrs` select pools and thus
- * doesn't participate in pool hash calculations or equality comparisons.
+ * If set, workers are only allowed to run inside @__pod_cpumask.
+ */
+ bool affn_strict;
+
+ /*
+ * Below fields aren't properties of a worker_pool. They only modify how
+ * :c:func:`apply_workqueue_attrs` select pools and thus don't
+ * participate in pool hash calculations or equality comparisons.
+ */
+
+ /**
+ * @affn_scope: unbound CPU affinity scope
+ *
+ * CPU pods are used to improve execution locality of unbound work
+ * items. There are multiple pod types, one for each wq_affn_scope, and
+ * every CPU in the system belongs to one pod in every pod type. CPUs
+ * that belong to the same pod share the worker pool. For example,
+ * selecting %WQ_AFFN_NUMA makes the workqueue use a separate worker
+ * pool for each NUMA node.
+ */
+ enum wq_affn_scope affn_scope;
+
+ /**
+ * @ordered: work items must be executed one by one in queueing order
*/
- bool no_numa;
+ bool ordered;
};
static inline struct delayed_work *to_delayed_work(struct work_struct *work)
@@ -343,14 +395,10 @@ enum {
__WQ_ORDERED_EXPLICIT = 1 << 19, /* internal: alloc_ordered_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
- WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
+ WQ_UNBOUND_MAX_ACTIVE = WQ_MAX_ACTIVE,
WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2,
};
-/* unbound wq's aren't per-cpu, scale max_active according to #cpus */
-#define WQ_UNBOUND_MAX_ACTIVE \
- max_t(int, WQ_MAX_ACTIVE, num_possible_cpus() * WQ_MAX_UNBOUND_PER_CPU)
-
/*
* System-wide workqueues which are always present.
*
@@ -391,7 +439,7 @@ extern struct workqueue_struct *system_freezable_power_efficient_wq;
* alloc_workqueue - allocate a workqueue
* @fmt: printf format for the name of the workqueue
* @flags: WQ_* flags
- * @max_active: max in-flight work items, 0 for default
+ * @max_active: max in-flight work items per CPU, 0 for default
* remaining args: args for @fmt
*
* Allocate a workqueue with the specified parameters. For detailed
@@ -569,6 +617,7 @@ static inline bool schedule_work(struct work_struct *work)
/*
* Detect attempt to flush system-wide workqueues at compile time when possible.
+ * Warn attempt to flush system-wide workqueues at runtime.
*
* See https://lkml.kernel.org/r/49925af7-78a8-a3dd-bce6-cfc02e1a9236@I-love.SAKURA.ne.jp
* for reasons and steps for converting system-wide workqueues into local workqueues.
@@ -576,52 +625,13 @@ static inline bool schedule_work(struct work_struct *work)
extern void __warn_flushing_systemwide_wq(void)
__compiletime_warning("Please avoid flushing system-wide workqueues.");
-/**
- * flush_scheduled_work - ensure that any scheduled work has run to completion.
- *
- * Forces execution of the kernel-global workqueue and blocks until its
- * completion.
- *
- * It's very easy to get into trouble if you don't take great care.
- * Either of the following situations will lead to deadlock:
- *
- * One of the work items currently on the workqueue needs to acquire
- * a lock held by your code or its caller.
- *
- * Your code is running in the context of a work routine.
- *
- * They will be detected by lockdep when they occur, but the first might not
- * occur very often. It depends on what work items are on the workqueue and
- * what locks they need, which you have no control over.
- *
- * In most situations flushing the entire workqueue is overkill; you merely
- * need to know that a particular work item isn't queued and isn't running.
- * In such cases you should use cancel_delayed_work_sync() or
- * cancel_work_sync() instead.
- *
- * Please stop calling this function! A conversion to stop flushing system-wide
- * workqueues is in progress. This function will be removed after all in-tree
- * users stopped calling this function.
- */
-/*
- * The background of commit 771c035372a036f8 ("deprecate the
- * '__deprecated' attribute warnings entirely and for good") is that,
- * since Linus builds all modules between every single pull he does,
- * the standard kernel build needs to be _clean_ in order to be able to
- * notice when new problems happen. Therefore, don't emit warning while
- * there are in-tree users.
- */
+/* Please stop using this function, for this function will be removed in near future. */
#define flush_scheduled_work() \
({ \
- if (0) \
- __warn_flushing_systemwide_wq(); \
+ __warn_flushing_systemwide_wq(); \
__flush_workqueue(system_wq); \
})
-/*
- * Although there is no longer in-tree caller, for now just emit warning
- * in order to give out-of-tree callers time to update.
- */
#define flush_workqueue(wq) \
({ \
struct workqueue_struct *_wq = (wq); \
@@ -714,5 +724,6 @@ int workqueue_offline_cpu(unsigned int cpu);
void __init workqueue_init_early(void);
void __init workqueue_init(void);
+void __init workqueue_init_topology(void);
#endif
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 741703b45f61..cb571dfcf4b1 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -856,6 +856,9 @@ static inline int __must_check xa_insert_irq(struct xarray *xa,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -886,6 +889,9 @@ static inline __must_check int xa_alloc(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -916,6 +922,9 @@ static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id,
* stores the index into the @id pointer, then stores the entry at
* that index. A concurrent lookup will not see an uninitialised @id.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 on success, -ENOMEM if memory could not be allocated or
@@ -949,6 +958,9 @@ static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock. May sleep if
* the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -983,6 +995,9 @@ static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Any context. Takes and releases the xa_lock while
* disabling softirqs. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
@@ -1017,6 +1032,9 @@ static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry,
* The search for an empty entry will start at @next and will wrap
* around if necessary.
*
+ * Must only be operated on an xarray initialized with flag XA_FLAGS_ALLOC set
+ * in xa_init_flags().
+ *
* Context: Process context. Takes and releases the xa_lock while
* disabling interrupts. May sleep if the @gfp flags permit.
* Return: 0 if the allocation succeeded without wrapping. 1 if the
diff --git a/include/linux/zswap.h b/include/linux/zswap.h
new file mode 100644
index 000000000000..2a60ce39cfde
--- /dev/null
+++ b/include/linux/zswap.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ZSWAP_H
+#define _LINUX_ZSWAP_H
+
+#include <linux/types.h>
+#include <linux/mm_types.h>
+
+extern u64 zswap_pool_total_size;
+extern atomic_t zswap_stored_pages;
+
+#ifdef CONFIG_ZSWAP
+
+bool zswap_store(struct folio *folio);
+bool zswap_load(struct folio *folio);
+void zswap_invalidate(int type, pgoff_t offset);
+void zswap_swapon(int type);
+void zswap_swapoff(int type);
+
+#else
+
+static inline bool zswap_store(struct folio *folio)
+{
+ return false;
+}
+
+static inline bool zswap_load(struct folio *folio)
+{
+ return false;
+}
+
+static inline void zswap_invalidate(int type, pgoff_t offset) {}
+static inline void zswap_swapon(int type) {}
+static inline void zswap_swapoff(int type) {}
+
+#endif
+
+#endif /* _LINUX_ZSWAP_H */
diff --git a/include/media/cec.h b/include/media/cec.h
index abee41ae02d0..9c007f83569a 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -113,22 +113,25 @@ struct cec_fh {
#define CEC_FREE_TIME_TO_USEC(ft) ((ft) * 2400)
struct cec_adap_ops {
- /* Low-level callbacks */
+ /* Low-level callbacks, called with adap->lock held */
int (*adap_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_all_enable)(struct cec_adapter *adap, bool enable);
int (*adap_monitor_pin_enable)(struct cec_adapter *adap, bool enable);
int (*adap_log_addr)(struct cec_adapter *adap, u8 logical_addr);
- void (*adap_configured)(struct cec_adapter *adap, bool configured);
+ void (*adap_unconfigured)(struct cec_adapter *adap);
int (*adap_transmit)(struct cec_adapter *adap, u8 attempts,
u32 signal_free_time, struct cec_msg *msg);
+ void (*adap_nb_transmit_canceled)(struct cec_adapter *adap,
+ const struct cec_msg *msg);
void (*adap_status)(struct cec_adapter *adap, struct seq_file *file);
void (*adap_free)(struct cec_adapter *adap);
- /* Error injection callbacks */
+ /* Error injection callbacks, called without adap->lock held */
int (*error_inj_show)(struct cec_adapter *adap, struct seq_file *sf);
bool (*error_inj_parse_line)(struct cec_adapter *adap, char *line);
- /* High-level CEC message callback */
+ /* High-level CEC message callback, called without adap->lock held */
+ void (*configured)(struct cec_adapter *adap);
int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index d03e5c54347a..6cce1f09c721 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -72,7 +72,7 @@ struct vpif_capture_config {
int i2c_adapter_id;
const char *card_name;
- struct v4l2_async_subdev *asd[VPIF_CAPTURE_MAX_CHANNELS];
+ struct v4l2_async_connection *asd[VPIF_CAPTURE_MAX_CHANNELS];
int asd_sizes[VPIF_CAPTURE_MAX_CHANNELS];
};
#endif /* _VPIF_TYPES_H */
diff --git a/include/media/i2c/ds90ub9xx.h b/include/media/i2c/ds90ub9xx.h
new file mode 100644
index 000000000000..0245198469ec
--- /dev/null
+++ b/include/media/i2c/ds90ub9xx.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __MEDIA_I2C_DS90UB9XX_H__
+#define __MEDIA_I2C_DS90UB9XX_H__
+
+#include <linux/types.h>
+
+struct i2c_atr;
+
+/**
+ * struct ds90ub9xx_platform_data - platform data for FPD-Link Serializers.
+ * @port: Deserializer RX port for this Serializer
+ * @atr: I2C ATR
+ * @bc_rate: back-channel clock rate
+ */
+struct ds90ub9xx_platform_data {
+ u32 port;
+ struct i2c_atr *atr;
+ unsigned long bc_rate;
+};
+
+#endif /* __MEDIA_I2C_DS90UB9XX_H__ */
diff --git a/include/media/ipu-bridge.h b/include/media/ipu-bridge.h
new file mode 100644
index 000000000000..bdc654a45521
--- /dev/null
+++ b/include/media/ipu-bridge.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Author: Dan Scally <djrscally@gmail.com> */
+#ifndef __IPU_BRIDGE_H
+#define __IPU_BRIDGE_H
+
+#include <linux/property.h>
+#include <linux/types.h>
+#include <media/v4l2-fwnode.h>
+
+#define IPU_HID "INT343E"
+#define IPU_MAX_LANES 4
+#define IPU_MAX_PORTS 4
+#define MAX_NUM_LINK_FREQS 3
+
+/* Values are educated guesses as we don't have a spec */
+#define IPU_SENSOR_ROTATION_NORMAL 0
+#define IPU_SENSOR_ROTATION_INVERTED 1
+
+#define IPU_SENSOR_CONFIG(_HID, _NR, ...) \
+ (const struct ipu_sensor_config) { \
+ .hid = _HID, \
+ .nr_link_freqs = _NR, \
+ .link_freqs = { __VA_ARGS__ } \
+ }
+
+#define NODE_SENSOR(_HID, _PROPS) \
+ (const struct software_node) { \
+ .name = _HID, \
+ .properties = _PROPS, \
+ }
+
+#define NODE_PORT(_PORT, _SENSOR_NODE) \
+ (const struct software_node) { \
+ .name = _PORT, \
+ .parent = _SENSOR_NODE, \
+ }
+
+#define NODE_ENDPOINT(_EP, _PORT, _PROPS) \
+ (const struct software_node) { \
+ .name = _EP, \
+ .parent = _PORT, \
+ .properties = _PROPS, \
+ }
+
+#define NODE_VCM(_TYPE) \
+ (const struct software_node) { \
+ .name = _TYPE, \
+ }
+
+enum ipu_sensor_swnodes {
+ SWNODE_SENSOR_HID,
+ SWNODE_SENSOR_PORT,
+ SWNODE_SENSOR_ENDPOINT,
+ SWNODE_IPU_PORT,
+ SWNODE_IPU_ENDPOINT,
+ /* below are optional / maybe empty */
+ SWNODE_IVSC_HID,
+ SWNODE_IVSC_SENSOR_PORT,
+ SWNODE_IVSC_SENSOR_ENDPOINT,
+ SWNODE_IVSC_IPU_PORT,
+ SWNODE_IVSC_IPU_ENDPOINT,
+ SWNODE_VCM,
+ SWNODE_COUNT
+};
+
+/* Data representation as it is in ACPI SSDB buffer */
+struct ipu_sensor_ssdb {
+ u8 version;
+ u8 sku;
+ u8 guid_csi2[16];
+ u8 devfunction;
+ u8 bus;
+ u32 dphylinkenfuses;
+ u32 clockdiv;
+ u8 link;
+ u8 lanes;
+ u32 csiparams[10];
+ u32 maxlanespeed;
+ u8 sensorcalibfileidx;
+ u8 sensorcalibfileidxInMBZ[3];
+ u8 romtype;
+ u8 vcmtype;
+ u8 platforminfo;
+ u8 platformsubinfo;
+ u8 flash;
+ u8 privacyled;
+ u8 degree;
+ u8 mipilinkdefined;
+ u32 mclkspeed;
+ u8 controllogicid;
+ u8 reserved1[3];
+ u8 mclkport;
+ u8 reserved2[13];
+} __packed;
+
+struct ipu_property_names {
+ char clock_frequency[16];
+ char rotation[9];
+ char orientation[12];
+ char bus_type[9];
+ char data_lanes[11];
+ char remote_endpoint[16];
+ char link_frequencies[17];
+};
+
+struct ipu_node_names {
+ char port[7];
+ char ivsc_sensor_port[7];
+ char ivsc_ipu_port[7];
+ char endpoint[11];
+ char remote_port[7];
+ char vcm[16];
+};
+
+struct ipu_sensor_config {
+ const char *hid;
+ const u8 nr_link_freqs;
+ const u64 link_freqs[MAX_NUM_LINK_FREQS];
+};
+
+struct ipu_sensor {
+ /* append ssdb.link(u8) in "-%u" format as suffix of HID */
+ char name[ACPI_ID_LEN + 4];
+ struct acpi_device *adev;
+
+ struct device *csi_dev;
+ struct acpi_device *ivsc_adev;
+ char ivsc_name[ACPI_ID_LEN + 4];
+
+ /* SWNODE_COUNT + 1 for terminating NULL */
+ const struct software_node *group[SWNODE_COUNT + 1];
+ struct software_node swnodes[SWNODE_COUNT];
+ struct ipu_node_names node_names;
+
+ u8 link;
+ u8 lanes;
+ u32 mclkspeed;
+ u32 rotation;
+ enum v4l2_fwnode_orientation orientation;
+ const char *vcm_type;
+
+ struct ipu_property_names prop_names;
+ struct property_entry ep_properties[5];
+ struct property_entry dev_properties[5];
+ struct property_entry ipu_properties[3];
+ struct property_entry ivsc_properties[1];
+ struct property_entry ivsc_sensor_ep_properties[4];
+ struct property_entry ivsc_ipu_ep_properties[4];
+
+ struct software_node_ref_args local_ref[1];
+ struct software_node_ref_args remote_ref[1];
+ struct software_node_ref_args vcm_ref[1];
+ struct software_node_ref_args ivsc_sensor_ref[1];
+ struct software_node_ref_args ivsc_ipu_ref[1];
+};
+
+typedef int (*ipu_parse_sensor_fwnode_t)(struct acpi_device *adev,
+ struct ipu_sensor *sensor);
+
+struct ipu_bridge {
+ struct device *dev;
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode;
+ char ipu_node_name[ACPI_ID_LEN];
+ struct software_node ipu_hid_node;
+ u32 data_lanes[4];
+ unsigned int n_sensors;
+ struct ipu_sensor sensors[IPU_MAX_PORTS];
+};
+
+#if IS_ENABLED(CONFIG_IPU_BRIDGE)
+int ipu_bridge_init(struct device *dev,
+ ipu_parse_sensor_fwnode_t parse_sensor_fwnode);
+int ipu_bridge_parse_ssdb(struct acpi_device *adev, struct ipu_sensor *sensor);
+int ipu_bridge_instantiate_vcm(struct device *sensor);
+#else
+/* Use a define to avoid the @parse_sensor_fwnode argument getting evaluated */
+#define ipu_bridge_init(dev, parse_sensor_fwnode) (0)
+static inline int ipu_bridge_instantiate_vcm(struct device *s) { return 0; }
+#endif
+
+#endif
diff --git a/include/media/ov_16bit_addr_reg_helpers.h b/include/media/ov_16bit_addr_reg_helpers.h
deleted file mode 100644
index 1c60a50bd795..000000000000
--- a/include/media/ov_16bit_addr_reg_helpers.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * I2C register access helpers for Omnivision OVxxxx image sensors which expect
- * a 16 bit register address in big-endian format and which have 1-3 byte
- * wide registers, in big-endian format (for the higher width registers).
- *
- * Based on the register helpers from drivers/media/i2c/ov2680.c which is:
- * Copyright (C) 2018 Linaro Ltd
- */
-#ifndef __OV_16BIT_ADDR_REG_HELPERS_H
-#define __OV_16BIT_ADDR_REG_HELPERS_H
-
-#include <asm/unaligned.h>
-#include <linux/dev_printk.h>
-#include <linux/i2c.h>
-
-static inline int ov_read_reg(struct i2c_client *client, u16 reg,
- unsigned int len, u32 *val)
-{
- u8 addr_buf[2], data_buf[4] = { };
- struct i2c_msg msgs[2];
- int ret;
-
- if (len > 4)
- return -EINVAL;
-
- put_unaligned_be16(reg, addr_buf);
-
- msgs[0].addr = client->addr;
- msgs[0].flags = 0;
- msgs[0].len = ARRAY_SIZE(addr_buf);
- msgs[0].buf = addr_buf;
-
- msgs[1].addr = client->addr;
- msgs[1].flags = I2C_M_RD;
- msgs[1].len = len;
- msgs[1].buf = &data_buf[4 - len];
-
- ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs));
- if (ret != ARRAY_SIZE(msgs)) {
- dev_err(&client->dev, "read error: reg=0x%4x: %d\n", reg, ret);
- return -EIO;
- }
-
- *val = get_unaligned_be32(data_buf);
-
- return 0;
-}
-
-#define ov_read_reg8(s, r, v) ov_read_reg(s, r, 1, v)
-#define ov_read_reg16(s, r, v) ov_read_reg(s, r, 2, v)
-#define ov_read_reg24(s, r, v) ov_read_reg(s, r, 3, v)
-
-static inline int ov_write_reg(struct i2c_client *client, u16 reg,
- unsigned int len, u32 val)
-{
- u8 buf[6];
- int ret;
-
- if (len > 4)
- return -EINVAL;
-
- put_unaligned_be16(reg, buf);
- put_unaligned_be32(val << (8 * (4 - len)), buf + 2);
- ret = i2c_master_send(client, buf, len + 2);
- if (ret != len + 2) {
- dev_err(&client->dev, "write error: reg=0x%4x: %d\n", reg, ret);
- return -EIO;
- }
-
- return 0;
-}
-
-#define ov_write_reg8(s, r, v) ov_write_reg(s, r, 1, v)
-#define ov_write_reg16(s, r, v) ov_write_reg(s, r, 2, v)
-#define ov_write_reg24(s, r, v) ov_write_reg(s, r, 3, v)
-
-static inline int ov_update_reg(struct i2c_client *client, u16 reg, u8 mask, u8 val)
-{
- u32 readval;
- int ret;
-
- ret = ov_read_reg8(client, reg, &readval);
- if (ret < 0)
- return ret;
-
- val = (readval & ~mask) | (val & mask);
-
- return ov_write_reg8(client, reg, val);
-}
-
-#endif
diff --git a/include/media/v4l2-async.h b/include/media/v4l2-async.h
index 25eb1d138c06..9bd326d31181 100644
--- a/include/media/v4l2-async.h
+++ b/include/media/v4l2-async.h
@@ -22,76 +22,85 @@ struct v4l2_async_notifier;
* enum v4l2_async_match_type - type of asynchronous subdevice logic to be used
* in order to identify a match
*
- * @V4L2_ASYNC_MATCH_I2C: Match will check for I2C adapter ID and address
- * @V4L2_ASYNC_MATCH_FWNODE: Match will use firmware node
+ * @V4L2_ASYNC_MATCH_TYPE_I2C: Match will check for I2C adapter ID and address
+ * @V4L2_ASYNC_MATCH_TYPE_FWNODE: Match will use firmware node
*
- * This enum is used by the asynchronous sub-device logic to define the
+ * This enum is used by the asynchronous connection logic to define the
* algorithm that will be used to match an asynchronous device.
*/
enum v4l2_async_match_type {
- V4L2_ASYNC_MATCH_I2C,
- V4L2_ASYNC_MATCH_FWNODE,
+ V4L2_ASYNC_MATCH_TYPE_I2C,
+ V4L2_ASYNC_MATCH_TYPE_FWNODE,
};
/**
- * struct v4l2_async_subdev - sub-device descriptor, as known to a bridge
- *
- * @match_type: type of match that will be used
- * @match: union of per-bus type matching data sets
- * @match.fwnode:
- * pointer to &struct fwnode_handle to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_FWNODE.
- * @match.i2c: embedded struct with I2C parameters to be matched.
+ * struct v4l2_async_match_desc - async connection match information
+ *
+ * @type: type of match that will be used
+ * @fwnode: pointer to &struct fwnode_handle to be matched.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_FWNODE.
+ * @i2c: embedded struct with I2C parameters to be matched.
* Both @match.i2c.adapter_id and @match.i2c.address
* should be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.i2c.adapter_id:
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
+ * @i2c.adapter_id:
* I2C adapter ID to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @match.i2c.address:
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
+ * @i2c.address:
* I2C address to be matched.
- * Used if @match_type is %V4L2_ASYNC_MATCH_I2C.
- * @asd_list: used to add struct v4l2_async_subdev objects to the
- * master notifier @asd_list
- * @list: used to link struct v4l2_async_subdev objects, waiting to be
- * probed, to a notifier->waiting list
- *
- * When this struct is used as a member in a driver specific struct,
- * the driver specific struct shall contain the &struct
- * v4l2_async_subdev as its first member.
+ * Used if @match_type is %V4L2_ASYNC_MATCH_TYPE_I2C.
*/
-struct v4l2_async_subdev {
- enum v4l2_async_match_type match_type;
+struct v4l2_async_match_desc {
+ enum v4l2_async_match_type type;
union {
struct fwnode_handle *fwnode;
struct {
int adapter_id;
unsigned short address;
} i2c;
- } match;
+ };
+};
- /* v4l2-async core private: not to be used by drivers */
- struct list_head list;
- struct list_head asd_list;
+/**
+ * struct v4l2_async_connection - sub-device connection descriptor, as known to
+ * a bridge
+ *
+ * @match: struct of match type and per-bus type matching data sets
+ * @notifier: the async notifier the connection is related to
+ * @asc_entry: used to add struct v4l2_async_connection objects to the
+ * notifier @waiting_list or @done_list
+ * @asc_subdev_entry: entry in struct v4l2_async_subdev.asc_list list
+ * @sd: the related sub-device
+ *
+ * When this struct is used as a member in a driver specific struct, the driver
+ * specific struct shall contain the &struct v4l2_async_connection as its first
+ * member.
+ */
+struct v4l2_async_connection {
+ struct v4l2_async_match_desc match;
+ struct v4l2_async_notifier *notifier;
+ struct list_head asc_entry;
+ struct list_head asc_subdev_entry;
+ struct v4l2_subdev *sd;
};
/**
* struct v4l2_async_notifier_operations - Asynchronous V4L2 notifier operations
- * @bound: a subdevice driver has successfully probed one of the subdevices
- * @complete: All subdevices have been probed successfully. The complete
+ * @bound: a sub-device has been bound by the given connection
+ * @complete: All connections have been bound successfully. The complete
* callback is only executed for the root notifier.
* @unbind: a subdevice is leaving
- * @destroy: the asd is about to be freed
+ * @destroy: the asc is about to be freed
*/
struct v4l2_async_notifier_operations {
int (*bound)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
+ struct v4l2_async_connection *asc);
int (*complete)(struct v4l2_async_notifier *notifier);
void (*unbind)(struct v4l2_async_notifier *notifier,
struct v4l2_subdev *subdev,
- struct v4l2_async_subdev *asd);
- void (*destroy)(struct v4l2_async_subdev *asd);
+ struct v4l2_async_connection *asc);
+ void (*destroy)(struct v4l2_async_connection *asc);
};
/**
@@ -101,20 +110,31 @@ struct v4l2_async_notifier_operations {
* @v4l2_dev: v4l2_device of the root notifier, NULL otherwise
* @sd: sub-device that registered the notifier, NULL otherwise
* @parent: parent notifier
- * @asd_list: master list of struct v4l2_async_subdev
- * @waiting: list of struct v4l2_async_subdev, waiting for their drivers
- * @done: list of struct v4l2_subdev, already probed
- * @list: member in a global list of notifiers
+ * @waiting_list: list of struct v4l2_async_connection, waiting for their
+ * drivers
+ * @done_list: list of struct v4l2_subdev, already probed
+ * @notifier_entry: member in a global list of notifiers
*/
struct v4l2_async_notifier {
const struct v4l2_async_notifier_operations *ops;
struct v4l2_device *v4l2_dev;
struct v4l2_subdev *sd;
struct v4l2_async_notifier *parent;
- struct list_head asd_list;
- struct list_head waiting;
- struct list_head done;
- struct list_head list;
+ struct list_head waiting_list;
+ struct list_head done_list;
+ struct list_head notifier_entry;
+};
+
+/**
+ * struct v4l2_async_subdev_endpoint - Entry in sub-device's fwnode list
+ *
+ * @async_subdev_endpoint_entry: An entry in async_subdev_endpoint_list of
+ * &struct v4l2_subdev
+ * @endpoint: Endpoint fwnode agains which to match the sub-device
+ */
+struct v4l2_async_subdev_endpoint {
+ struct list_head async_subdev_endpoint_entry;
+ struct fwnode_handle *endpoint;
};
/**
@@ -128,76 +148,71 @@ void v4l2_async_debug_init(struct dentry *debugfs_dir);
* v4l2_async_nf_init - Initialize a notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
+ * @v4l2_dev: pointer to &struct v4l2_device
*
- * This function initializes the notifier @asd_list. It must be called
+ * This function initializes the notifier @asc_entry. It must be called
* before adding a subdevice to a notifier, using one of:
* v4l2_async_nf_add_fwnode_remote(),
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_i2c(),
- * __v4l2_async_nf_add_subdev() or
- * v4l2_async_nf_parse_fwnode_endpoints().
+ * v4l2_async_nf_add_fwnode() or
+ * v4l2_async_nf_add_i2c().
*/
-void v4l2_async_nf_init(struct v4l2_async_notifier *notifier);
+void v4l2_async_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_device *v4l2_dev);
/**
- * __v4l2_async_nf_add_subdev - Add an async subdev to the
- * notifier's master asd list.
+ * v4l2_async_subdev_nf_init - Initialize a sub-device notifier.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @asd: pointer to &struct v4l2_async_subdev
+ * @sd: pointer to &struct v4l2_subdev
*
- * \warning: Drivers should avoid using this function and instead use one of:
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_fwnode_remote() or
+ * This function initializes the notifier @asc_list. It must be called
+ * before adding a subdevice to a notifier, using one of:
+ * v4l2_async_nf_add_fwnode_remote(), v4l2_async_nf_add_fwnode() or
* v4l2_async_nf_add_i2c().
- *
- * Call this function before registering a notifier to link the provided @asd to
- * the notifiers master @asd_list. The @asd must be allocated with k*alloc() as
- * it will be freed by the framework when the notifier is destroyed.
*/
-int __v4l2_async_nf_add_subdev(struct v4l2_async_notifier *notifier,
- struct v4l2_async_subdev *asd);
+void v4l2_async_subdev_nf_init(struct v4l2_async_notifier *notifier,
+ struct v4l2_subdev *sd);
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode(struct v4l2_async_notifier *notifier,
struct fwnode_handle *fwnode,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_fwnode - Allocate and add a fwnode async
- * subdev to the notifier's master asd_list.
+ * subdev to the notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
* @fwnode: fwnode handle of the sub-device to be matched, pointer to
* &struct fwnode_handle
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @type: Type of the driver's async sub-device or connection struct. The
+ * &struct v4l2_async_connection shall be the first member of the
+ * driver's async struct, i.e. both begin at the same memory address.
*
- * Allocate a fwnode-matched asd of size asd_struct_size, and add it to the
- * notifiers @asd_list. The function also gets a reference of the fwnode which
+ * Allocate a fwnode-matched asc of size asc_struct_size, and add it to the
+ * notifiers @asc_list. The function also gets a reference of the fwnode which
* is released later at notifier cleanup time.
*/
#define v4l2_async_nf_add_fwnode(notifier, fwnode, type) \
((type *)__v4l2_async_nf_add_fwnode(notifier, fwnode, sizeof(type)))
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
struct fwnode_handle *endpoint,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_fwnode_remote - Allocate and add a fwnode
* remote async subdev to the
- * notifier's master asd_list.
+ * notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
- * @ep: local endpoint pointing to the remote sub-device to be matched,
+ * @ep: local endpoint pointing to the remote connection to be matched,
* pointer to &struct fwnode_handle
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @type: Type of the driver's async connection struct. The &struct
+ * v4l2_async_connection shall be the first member of the driver's async
+ * connection struct, i.e. both begin at the same memory address.
*
* Gets the remote endpoint of a given local endpoint, set it up for fwnode
- * matching and adds the async sub-device to the notifier's @asd_list. The
+ * matching and adds the async connection to the notifier's @asc_list. The
* function also gets a reference of the fwnode which is released later at
* notifier cleanup time.
*
@@ -207,46 +222,63 @@ __v4l2_async_nf_add_fwnode_remote(struct v4l2_async_notifier *notif,
#define v4l2_async_nf_add_fwnode_remote(notifier, ep, type) \
((type *)__v4l2_async_nf_add_fwnode_remote(notifier, ep, sizeof(type)))
-struct v4l2_async_subdev *
+struct v4l2_async_connection *
__v4l2_async_nf_add_i2c(struct v4l2_async_notifier *notifier,
int adapter_id, unsigned short address,
- unsigned int asd_struct_size);
+ unsigned int asc_struct_size);
/**
* v4l2_async_nf_add_i2c - Allocate and add an i2c async
- * subdev to the notifier's master asd_list.
+ * subdev to the notifier's master asc_list.
*
* @notifier: pointer to &struct v4l2_async_notifier
* @adapter: I2C adapter ID to be matched
- * @address: I2C address of sub-device to be matched
- * @type: Type of the driver's async sub-device struct. The &struct
- * v4l2_async_subdev shall be the first member of the driver's async
- * sub-device struct, i.e. both begin at the same memory address.
+ * @address: I2C address of connection to be matched
+ * @type: Type of the driver's async connection struct. The &struct
+ * v4l2_async_connection shall be the first member of the driver's async
+ * connection struct, i.e. both begin at the same memory address.
*
* Same as v4l2_async_nf_add_fwnode() but for I2C matched
- * sub-devices.
+ * connections.
*/
#define v4l2_async_nf_add_i2c(notifier, adapter, address, type) \
((type *)__v4l2_async_nf_add_i2c(notifier, adapter, address, \
sizeof(type)))
/**
- * v4l2_async_nf_register - registers a subdevice asynchronous notifier
+ * v4l2_async_subdev_endpoint_add - Add an endpoint fwnode to async sub-device
+ * matching list
*
- * @v4l2_dev: pointer to &struct v4l2_device
- * @notifier: pointer to &struct v4l2_async_notifier
+ * @sd: the sub-device
+ * @fwnode: the endpoint fwnode to match
+ *
+ * Add a fwnode to the async sub-device's matching list. This allows registering
+ * multiple async sub-devices from a single device.
+ *
+ * Note that calling v4l2_subdev_cleanup() as part of the sub-device's cleanup
+ * if endpoints have been added to the sub-device's fwnode matching list.
+ *
+ * Returns an error on failure, 0 on success.
*/
-int v4l2_async_nf_register(struct v4l2_device *v4l2_dev,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_subdev_endpoint_add(struct v4l2_subdev *sd,
+ struct fwnode_handle *fwnode);
/**
- * v4l2_async_subdev_nf_register - registers a subdevice asynchronous
- * notifier for a sub-device
+ * v4l2_async_connection_unique - return a unique &struct v4l2_async_connection
+ * for a sub-device
+ * @sd: the sub-device
+ *
+ * Return an async connection for a sub-device, when there is a single
+ * one only.
+ */
+struct v4l2_async_connection *
+v4l2_async_connection_unique(struct v4l2_subdev *sd);
+
+/**
+ * v4l2_async_nf_register - registers a subdevice asynchronous notifier
*
- * @sd: pointer to &struct v4l2_subdev
* @notifier: pointer to &struct v4l2_async_notifier
*/
-int v4l2_async_subdev_nf_register(struct v4l2_subdev *sd,
- struct v4l2_async_notifier *notifier);
+int v4l2_async_nf_register(struct v4l2_async_notifier *notifier);
/**
* v4l2_async_nf_unregister - unregisters a subdevice
@@ -261,14 +293,10 @@ void v4l2_async_nf_unregister(struct v4l2_async_notifier *notifier);
* @notifier: the notifier the resources of which are to be cleaned up
*
* Release memory resources related to a notifier, including the async
- * sub-devices allocated for the purposes of the notifier but not the notifier
+ * connections allocated for the purposes of the notifier but not the notifier
* itself. The user is responsible for calling this function to clean up the
- * notifier after calling
- * v4l2_async_nf_add_fwnode_remote(),
- * v4l2_async_nf_add_fwnode(),
- * v4l2_async_nf_add_i2c(),
- * __v4l2_async_nf_add_subdev() or
- * v4l2_async_nf_parse_fwnode_endpoints().
+ * notifier after calling v4l2_async_nf_add_fwnode_remote(),
+ * v4l2_async_nf_add_fwnode() or v4l2_async_nf_add_i2c().
*
* There is no harm from calling v4l2_async_nf_cleanup() in other
* cases as long as its memory has been zeroed after it has been
diff --git a/include/media/v4l2-cci.h b/include/media/v4l2-cci.h
new file mode 100644
index 000000000000..0f6803e4b17e
--- /dev/null
+++ b/include/media/v4l2-cci.h
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * MIPI Camera Control Interface (CCI) register access helpers.
+ *
+ * Copyright (C) 2023 Hans de Goede <hansg@kernel.org>
+ */
+#ifndef _V4L2_CCI_H
+#define _V4L2_CCI_H
+
+#include <linux/types.h>
+
+struct i2c_client;
+struct regmap;
+
+/**
+ * struct cci_reg_sequence - An individual write from a sequence of CCI writes
+ *
+ * @reg: Register address, use CCI_REG#() macros to encode reg width
+ * @val: Register value
+ *
+ * Register/value pairs for sequences of writes.
+ */
+struct cci_reg_sequence {
+ u32 reg;
+ u64 val;
+};
+
+/*
+ * Macros to define register address with the register width encoded
+ * into the higher bits.
+ */
+#define CCI_REG_ADDR_MASK GENMASK(15, 0)
+#define CCI_REG_WIDTH_SHIFT 16
+#define CCI_REG_WIDTH_MASK GENMASK(19, 16)
+
+#define CCI_REG8(x) ((1 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG16(x) ((2 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG24(x) ((3 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG32(x) ((4 << CCI_REG_WIDTH_SHIFT) | (x))
+#define CCI_REG64(x) ((8 << CCI_REG_WIDTH_SHIFT) | (x))
+
+/**
+ * cci_read() - Read a value from a single CCI register
+ *
+ * @map: Register map to read from
+ * @reg: Register address to read, use CCI_REG#() macros to encode reg width
+ * @val: Pointer to store read value
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the read will be skipped
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_read(struct regmap *map, u32 reg, u64 *val, int *err);
+
+/**
+ * cci_write() - Write a value to a single CCI register
+ *
+ * @map: Register map to write to
+ * @reg: Register address to write, use CCI_REG#() macros to encode reg width
+ * @val: Value to be written
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the write will be skipped
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_write(struct regmap *map, u32 reg, u64 val, int *err);
+
+/**
+ * cci_update_bits() - Perform a read/modify/write cycle on
+ * a single CCI register
+ *
+ * @map: Register map to update
+ * @reg: Register address to update, use CCI_REG#() macros to encode reg width
+ * @mask: Bitmask to change
+ * @val: New value for bitmask
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the update will be skipped
+ *
+ * Note this uses read-modify-write to update the bits, atomicity with regards
+ * to other cci_*() register access functions is NOT guaranteed.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_update_bits(struct regmap *map, u32 reg, u64 mask, u64 val, int *err);
+
+/**
+ * cci_multi_reg_write() - Write multiple registers to the device
+ *
+ * @map: Register map to write to
+ * @regs: Array of structures containing register-address, -value pairs to be
+ * written, register-addresses use CCI_REG#() macros to encode reg width
+ * @num_regs: Number of registers to write
+ * @err: Optional pointer to store errors, if a previous error is set
+ * then the write will be skipped
+ *
+ * Write multiple registers to the device where the set of register, value
+ * pairs are supplied in any order, possibly not all in a single range.
+ *
+ * Use of the CCI_REG#() macros to encode reg width is mandatory.
+ *
+ * For raw lists of register-address, -value pairs with only 8 bit
+ * wide writes regmap_multi_reg_write() can be used instead.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+int cci_multi_reg_write(struct regmap *map, const struct cci_reg_sequence *regs,
+ unsigned int num_regs, int *err);
+
+#if IS_ENABLED(CONFIG_V4L2_CCI_I2C)
+/**
+ * devm_cci_regmap_init_i2c() - Create regmap to use with cci_*() register
+ * access functions
+ *
+ * @client: i2c_client to create the regmap for
+ * @reg_addr_bits: register address width to use (8 or 16)
+ *
+ * Note the memory for the created regmap is devm() managed, tied to the client.
+ *
+ * Return: %0 on success or a negative error code on failure.
+ */
+struct regmap *devm_cci_regmap_init_i2c(struct i2c_client *client,
+ int reg_addr_bits);
+#endif
+
+#endif
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index 394d798f3dfa..f7c57c776589 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -21,10 +21,6 @@
#include <media/v4l2-mediabus.h>
-struct fwnode_handle;
-struct v4l2_async_notifier;
-struct v4l2_async_subdev;
-
/**
* struct v4l2_fwnode_endpoint - the endpoint data structure
* @base: fwnode endpoint of the v4l2_fwnode
@@ -393,72 +389,6 @@ int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
int v4l2_fwnode_device_parse(struct device *dev,
struct v4l2_fwnode_device_properties *props);
-/**
- * typedef parse_endpoint_func - Driver's callback function to be called on
- * each V4L2 fwnode endpoint.
- *
- * @dev: pointer to &struct device
- * @vep: pointer to &struct v4l2_fwnode_endpoint
- * @asd: pointer to &struct v4l2_async_subdev
- *
- * Return:
- * * %0 on success
- * * %-ENOTCONN if the endpoint is to be skipped but this
- * should not be considered as an error
- * * %-EINVAL if the endpoint configuration is invalid
- */
-typedef int (*parse_endpoint_func)(struct device *dev,
- struct v4l2_fwnode_endpoint *vep,
- struct v4l2_async_subdev *asd);
-
-/**
- * v4l2_async_nf_parse_fwnode_endpoints - Parse V4L2 fwnode endpoints in a
- * device node
- * @dev: the device the endpoints of which are to be parsed
- * @notifier: notifier for @dev
- * @asd_struct_size: size of the driver's async sub-device struct, including
- * sizeof(struct v4l2_async_subdev). The &struct
- * v4l2_async_subdev shall be the first member of
- * the driver's async sub-device struct, i.e. both
- * begin at the same memory address.
- * @parse_endpoint: Driver's callback function called on each V4L2 fwnode
- * endpoint. Optional.
- *
- * DEPRECATED! This function is deprecated. Don't use it in new drivers.
- * Instead see an example in cio2_parse_firmware() function in
- * drivers/media/pci/intel/ipu3/ipu3-cio2.c .
- *
- * Parse the fwnode endpoints of the @dev device and populate the async sub-
- * devices list in the notifier. The @parse_endpoint callback function is
- * called for each endpoint with the corresponding async sub-device pointer to
- * let the caller initialize the driver-specific part of the async sub-device
- * structure.
- *
- * The notifier memory shall be zeroed before this function is called on the
- * notifier.
- *
- * This function may not be called on a registered notifier and may be called on
- * a notifier only once.
- *
- * The &struct v4l2_fwnode_endpoint passed to the callback function
- * @parse_endpoint is released once the function is finished. If there is a need
- * to retain that configuration, the user needs to allocate memory for it.
- *
- * Any notifier populated using this function must be released with a call to
- * v4l2_async_nf_cleanup() after it has been unregistered and the async
- * sub-devices are no longer in use, even if the function returned an error.
- *
- * Return: %0 on success, including when no async sub-devices are found
- * %-ENOMEM if memory allocation failed
- * %-EINVAL if graph or endpoint parsing failed
- * Other error codes as returned by @parse_endpoint
- */
-int
-v4l2_async_nf_parse_fwnode_endpoints(struct device *dev,
- struct v4l2_async_notifier *notifier,
- size_t asd_struct_size,
- parse_endpoint_func parse_endpoint);
-
/* Helper macros to access the connector links. */
/** v4l2_connector_last_link - Helper macro to get the first
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index b325df0d54d6..d9fca929c10b 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1020,12 +1020,14 @@ struct v4l2_subdev_platform_data {
* @dev: pointer to the physical device, if any
* @fwnode: The fwnode_handle of the subdev, usually the same as
* either dev->of_node->fwnode or dev->fwnode (whichever is non-NULL).
- * @async_list: Links this subdev to a global subdev_list or @notifier->done
- * list.
- * @asd: Pointer to respective &struct v4l2_async_subdev.
- * @notifier: Pointer to the managing notifier.
+ * @async_list: Links this subdev to a global subdev_list or
+ * @notifier->done_list list.
+ * @async_subdev_endpoint_list: List entry in async_subdev_endpoint_entry of
+ * &struct v4l2_async_subdev_endpoint.
* @subdev_notifier: A sub-device notifier implicitly registered for the sub-
* device using v4l2_async_register_subdev_sensor().
+ * @asc_list: Async connection list, of &struct
+ * v4l2_async_connection.subdev_entry.
* @pdata: common part of subdevice platform data
* @state_lock: A pointer to a lock used for all the subdev's states, set by the
* driver. This is optional. If NULL, each state instance will get
@@ -1065,9 +1067,9 @@ struct v4l2_subdev {
struct device *dev;
struct fwnode_handle *fwnode;
struct list_head async_list;
- struct v4l2_async_subdev *asd;
- struct v4l2_async_notifier *notifier;
+ struct list_head async_subdev_endpoint_list;
struct v4l2_async_notifier *subdev_notifier;
+ struct list_head asc_list;
struct v4l2_subdev_platform_data *pdata;
struct mutex *state_lock;
@@ -1383,8 +1385,9 @@ int __v4l2_subdev_init_finalize(struct v4l2_subdev *sd, const char *name,
* v4l2_subdev_cleanup() - Releases the resources allocated by the subdevice
* @sd: The subdevice
*
- * This function will release the resources allocated in
- * v4l2_subdev_init_finalize.
+ * Clean up a V4L2 async sub-device. Must be called for a sub-device as part of
+ * its release if resources have been associated with it using
+ * v4l2_async_subdev_endpoint_add() or v4l2_subdev_init_finalize().
*/
void v4l2_subdev_cleanup(struct v4l2_subdev *sd);
@@ -1532,7 +1535,7 @@ __v4l2_subdev_next_active_route(const struct v4l2_subdev_krouting *routing,
*/
int v4l2_subdev_set_routing_with_fmt(struct v4l2_subdev *sd,
struct v4l2_subdev_state *state,
- struct v4l2_subdev_krouting *routing,
+ const struct v4l2_subdev_krouting *routing,
const struct v4l2_mbus_framefmt *fmt);
/**
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index e6359f7346f1..c33348ba1657 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -350,7 +350,7 @@ struct hci_dev {
struct list_head list;
struct mutex lock;
- char name[8];
+ const char *name;
unsigned long flags;
__u16 id;
__u8 bus;
diff --git a/include/net/ip.h b/include/net/ip.h
index 19adacd5ece0..3489a1cca5e7 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -57,6 +57,7 @@ struct inet_skb_parm {
#define IPSKB_FRAG_PMTU BIT(6)
#define IPSKB_L3SLAVE BIT(7)
#define IPSKB_NOPOLICY BIT(8)
+#define IPSKB_MULTIPATH BIT(9)
u16 frag_max_size;
};
@@ -94,7 +95,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
ipcm_init(ipcm);
ipcm->sockc.mark = READ_ONCE(inet->sk.sk_mark);
- ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ ipcm->sockc.tsflags = READ_ONCE(inet->sk.sk_tsflags);
ipcm->oif = READ_ONCE(inet->sk.sk_bound_dev_if);
ipcm->addr = inet->inet_saddr;
ipcm->protocol = inet->inet_num;
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index c9ff23cf313e..1ba9f4ddf2f6 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -642,7 +642,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
if (!net->ipv6.fib6_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl6->fl6_sport = flkeys->ports.src;
fl6->fl6_dport = flkeys->ports.dst;
fl6->flowi6_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index a378eff827c7..15de07d36540 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -154,6 +154,7 @@ struct fib_info {
int fib_nhs;
bool fib_nh_is_v6;
bool nh_updated;
+ bool pfsrc_removed;
struct nexthop *nh;
struct rcu_head rcu;
struct fib_nh fib_nh[];
@@ -418,7 +419,10 @@ static inline bool fib4_rules_early_flow_dissect(struct net *net,
if (!net->ipv4.fib_rules_require_fldissect)
return false;
- skb_flow_dissect_flow_keys(skb, flkeys, flag);
+ memset(flkeys, 0, sizeof(*flkeys));
+ __skb_flow_dissect(net, skb, &flow_keys_dissector,
+ flkeys, NULL, 0, 0, 0, flag);
+
fl4->fl4_sport = flkeys->ports.src;
fl4->fl4_dport = flkeys->ports.dst;
fl4->flowi4_proto = flkeys->basic.ip_proto;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index e8750b4ef7e1..f346b4efbc30 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -483,15 +483,14 @@ static inline void iptunnel_xmit_stats(struct net_device *dev, int pkt_len)
u64_stats_inc(&tstats->tx_packets);
u64_stats_update_end(&tstats->syncp);
put_cpu_ptr(tstats);
+ return;
+ }
+
+ if (pkt_len < 0) {
+ DEV_STATS_INC(dev, tx_errors);
+ DEV_STATS_INC(dev, tx_aborted_errors);
} else {
- struct net_device_stats *err_stats = &dev->stats;
-
- if (pkt_len < 0) {
- err_stats->tx_errors++;
- err_stats->tx_aborted_errors++;
- } else {
- err_stats->tx_dropped++;
- }
+ DEV_STATS_INC(dev, tx_dropped);
}
}
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index d40d8238d4c2..c6932d1a3fa8 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -784,6 +784,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
+static inline bool ipv6_addr_v4mapped_any(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_zeronet(a->s6_addr32[3]);
+}
+
static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
{
return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
@@ -1272,7 +1277,9 @@ static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; }
#ifdef CONFIG_SYSCTL
struct ctl_table *ipv6_icmp_sysctl_init(struct net *net);
+size_t ipv6_icmp_sysctl_table_size(void);
struct ctl_table *ipv6_route_sysctl_init(struct net *net);
+size_t ipv6_route_sysctl_table_size(struct net *net);
int ipv6_sysctl_register(void);
void ipv6_sysctl_unregister(void);
#endif
@@ -1358,7 +1365,7 @@ static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val)
return 0;
}
-static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val)
+static inline int ip6_sock_set_addr_preferences(struct sock *sk, int val)
{
int ret;
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 6da68886fabb..07022bb0d44d 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -539,7 +539,7 @@ static inline int neigh_output(struct neighbour *n, struct sk_buff *skb,
READ_ONCE(hh->hh_len))
return neigh_hh_output(hh, skb);
- return n->output(n, skb);
+ return READ_ONCE(n->output)(n, skb);
}
static inline struct neighbour *
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 9f6add96de2d..eb6cd43b1746 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -471,15 +471,17 @@ void unregister_pernet_device(struct pernet_operations *);
struct ctl_table;
+#define register_net_sysctl(net, path, table) \
+ register_net_sysctl_sz(net, path, table, ARRAY_SIZE(table))
#ifdef CONFIG_SYSCTL
int net_sysctl_init(void);
-struct ctl_table_header *register_net_sysctl(struct net *net, const char *path,
- struct ctl_table *table);
+struct ctl_table_header *register_net_sysctl_sz(struct net *net, const char *path,
+ struct ctl_table *table, size_t table_size);
void unregister_net_sysctl_table(struct ctl_table_header *header);
#else
static inline int net_sysctl_init(void) { return 0; }
-static inline struct ctl_table_header *register_net_sysctl(struct net *net,
- const char *path, struct ctl_table *table)
+static inline struct ctl_table_header *register_net_sysctl_sz(struct net *net,
+ const char *path, struct ctl_table *table, size_t table_size)
{
return NULL;
}
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index dd40c75011d2..7c816359d5a9 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -1682,7 +1682,7 @@ struct nft_trans_gc {
struct net *net;
struct nft_set *set;
u32 seq;
- u8 count;
+ u16 count;
void *priv[NFT_TRANS_GC_BATCHCOUNT];
struct rcu_head rcu;
};
@@ -1700,8 +1700,9 @@ void nft_trans_gc_queue_sync_done(struct nft_trans_gc *trans);
void nft_trans_gc_elem_add(struct nft_trans_gc *gc, void *priv);
-struct nft_trans_gc *nft_trans_gc_catchall(struct nft_trans_gc *gc,
- unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_async(struct nft_trans_gc *gc,
+ unsigned int gc_seq);
+struct nft_trans_gc *nft_trans_gc_catchall_sync(struct nft_trans_gc *gc);
void nft_setelem_data_deactivate(const struct net *net,
const struct nft_set *set,
diff --git a/include/net/scm.h b/include/net/scm.h
index c5bcdf65f55c..e8c76b4be2fe 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -9,6 +9,7 @@
#include <linux/pid.h>
#include <linux/nsproxy.h>
#include <linux/sched/signal.h>
+#include <net/compat.h>
/* Well, we should have at least one descriptor open
* to accept passed FDs 8)
@@ -123,14 +124,17 @@ static inline bool scm_has_secdata(struct socket *sock)
static __inline__ void scm_pidfd_recv(struct msghdr *msg, struct scm_cookie *scm)
{
struct file *pidfd_file = NULL;
- int pidfd;
+ int len, pidfd;
- /*
- * put_cmsg() doesn't return an error if CMSG is truncated,
+ /* put_cmsg() doesn't return an error if CMSG is truncated,
* that's why we need to opencode these checks here.
*/
- if ((msg->msg_controllen <= sizeof(struct cmsghdr)) ||
- (msg->msg_controllen - sizeof(struct cmsghdr)) < sizeof(int)) {
+ if (msg->msg_flags & MSG_CMSG_COMPAT)
+ len = sizeof(struct compat_cmsghdr) + sizeof(int);
+ else
+ len = sizeof(struct cmsghdr) + sizeof(int);
+
+ if (msg->msg_controllen < len) {
msg->msg_flags |= MSG_CTRUNC;
return;
}
diff --git a/include/net/sock.h b/include/net/sock.h
index 11d503417591..b770261fbdaf 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1053,6 +1053,12 @@ static inline void sk_wmem_queued_add(struct sock *sk, int val)
WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val);
}
+static inline void sk_forward_alloc_add(struct sock *sk, int val)
+{
+ /* Paired with lockless reads of sk->sk_forward_alloc */
+ WRITE_ONCE(sk->sk_forward_alloc, sk->sk_forward_alloc + val);
+}
+
void sk_stream_write_space(struct sock *sk);
/* OOB backlog add */
@@ -1377,7 +1383,7 @@ static inline int sk_forward_alloc_get(const struct sock *sk)
if (sk->sk_prot->forward_alloc_get)
return sk->sk_prot->forward_alloc_get(sk);
#endif
- return sk->sk_forward_alloc;
+ return READ_ONCE(sk->sk_forward_alloc);
}
static inline bool __sk_stream_memory_free(const struct sock *sk, int wake)
@@ -1673,14 +1679,14 @@ static inline void sk_mem_charge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc -= size;
+ sk_forward_alloc_add(sk, -size);
}
static inline void sk_mem_uncharge(struct sock *sk, int size)
{
if (!sk_has_account(sk))
return;
- sk->sk_forward_alloc += size;
+ sk_forward_alloc_add(sk, size);
sk_mem_reclaim(sk);
}
@@ -1900,7 +1906,9 @@ struct sockcm_cookie {
static inline void sockcm_init(struct sockcm_cookie *sockc,
const struct sock *sk)
{
- *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+ *sockc = (struct sockcm_cookie) {
+ .tsflags = READ_ONCE(sk->sk_tsflags)
+ };
}
int __sock_cmsg_send(struct sock *sk, struct cmsghdr *cmsg,
@@ -2695,9 +2703,9 @@ void __sock_recv_wifi_status(struct msghdr *msg, struct sock *sk,
static inline void
sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
{
- ktime_t kt = skb->tstamp;
struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
-
+ u32 tsflags = READ_ONCE(sk->sk_tsflags);
+ ktime_t kt = skb->tstamp;
/*
* generate control messages if
* - receive time stamping in software requested
@@ -2705,10 +2713,10 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
* - hardware time stamps available and wanted
*/
if (sock_flag(sk, SOCK_RCVTSTAMP) ||
- (sk->sk_tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
- (kt && sk->sk_tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
+ (tsflags & SOF_TIMESTAMPING_RX_SOFTWARE) ||
+ (kt && tsflags & SOF_TIMESTAMPING_SOFTWARE) ||
(hwtstamps->hwtstamp &&
- (sk->sk_tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
+ (tsflags & SOF_TIMESTAMPING_RAW_HARDWARE)))
__sock_recv_timestamp(msg, sk, skb);
else
sock_write_timestamp(sk, kt);
@@ -2730,7 +2738,8 @@ static inline void sock_recv_cmsgs(struct msghdr *msg, struct sock *sk,
#define TSFLAGS_ANY (SOF_TIMESTAMPING_SOFTWARE | \
SOF_TIMESTAMPING_RAW_HARDWARE)
- if (sk->sk_flags & FLAGS_RECV_CMSGS || sk->sk_tsflags & TSFLAGS_ANY)
+ if (sk->sk_flags & FLAGS_RECV_CMSGS ||
+ READ_ONCE(sk->sk_tsflags) & TSFLAGS_ANY)
__sock_recv_cmsgs(msg, sk, skb);
else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sock_write_timestamp(sk, skb->tstamp);
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 07b21d9a9620..91688d0dadcd 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -45,7 +45,6 @@
#include <linux/memcontrol.h>
#include <linux/bpf-cgroup.h>
#include <linux/siphash.h>
-#include <linux/net_mm.h>
extern struct inet_hashinfo tcp_hashinfo;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 1e7774ac808f..533ab92684d8 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -4440,8 +4440,6 @@ struct net_device *ib_get_net_dev_by_params(struct ib_device *dev, u32 port,
const struct sockaddr *addr);
int ib_device_set_netdev(struct ib_device *ib_dev, struct net_device *ndev,
unsigned int port);
-struct net_device *ib_device_netdev(struct ib_device *dev, u32 port);
-
struct ib_wq *ib_create_wq(struct ib_pd *pd,
struct ib_wq_init_attr *init_attr);
int ib_destroy_wq_user(struct ib_wq *wq, struct ib_udata *udata);
diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
index 03abd30e6c8c..2b22f153ef63 100644
--- a/include/rdma/iw_cm.h
+++ b/include/rdma/iw_cm.h
@@ -115,27 +115,6 @@ struct iw_cm_id *iw_create_cm_id(struct ib_device *device,
void iw_destroy_cm_id(struct iw_cm_id *cm_id);
/**
- * iw_cm_bind_qp - Unbind the specified IW CM identifier and QP
- *
- * @cm_id: The IW CM idenfier to unbind from the QP.
- * @qp: The QP
- *
- * This is called by the provider when destroying the QP to ensure
- * that any references held by the IWCM are released. It may also
- * be called by the IWCM when destroying a CM_ID to that any
- * references held by the provider are released.
- */
-void iw_cm_unbind_qp(struct iw_cm_id *cm_id, struct ib_qp *qp);
-
-/**
- * iw_cm_get_qp - Return the ib_qp associated with a QPN
- *
- * @ib_device: The IB device
- * @qpn: The queue pair number
- */
-struct ib_qp *iw_cm_get_qp(struct ib_device *device, int qpn);
-
-/**
* iw_cm_listen - Listen for incoming connection requests on the
* specified IW CM id.
*
diff --git a/include/rv/da_monitor.h b/include/rv/da_monitor.h
index 9eb75683e012..9705b2a98e49 100644
--- a/include/rv/da_monitor.h
+++ b/include/rv/da_monitor.h
@@ -262,7 +262,7 @@ static inline void da_monitor_destroy_##name(void) \
/* \
* per-cpu monitor variables \
*/ \
-DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
+static DEFINE_PER_CPU(struct da_monitor, da_mon_##name); \
\
/* \
* da_get_monitor_##name - return current CPU monitor address \
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 159823e0afbf..8a43534eea5c 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -23,22 +23,12 @@
struct block_device;
-enum sas_class {
- SAS,
- EXPANDER
-};
-
enum sas_phy_role {
PHY_ROLE_NONE = 0,
PHY_ROLE_TARGET = 0x40,
PHY_ROLE_INITIATOR = 0x80,
};
-enum sas_phy_type {
- PHY_TYPE_PHYSICAL,
- PHY_TYPE_VIRTUAL
-};
-
/* The events are mnemonically described in sas_dump.c
* so when updating/adding events here, please also
* update the other file too.
@@ -258,7 +248,6 @@ struct asd_sas_port {
/* public: */
int id;
- enum sas_class class;
u8 sas_addr[SAS_ADDR_SIZE];
u8 attached_sas_addr[SAS_ADDR_SIZE];
enum sas_protocol iproto;
@@ -319,11 +308,9 @@ struct asd_sas_phy {
int enabled; /* must be set */
int id; /* must be set */
- enum sas_class class;
enum sas_protocol iproto;
enum sas_protocol tproto;
- enum sas_phy_type type;
enum sas_phy_role role;
enum sas_oob_mode oob_mode;
enum sas_linkrate linkrate;
@@ -346,11 +333,6 @@ struct asd_sas_phy {
void *lldd_phy; /* not touched by the sas_class_code */
};
-struct scsi_core {
- struct Scsi_Host *shost;
-
-};
-
enum sas_ha_state {
SAS_HA_REGISTERED,
SAS_HA_DRAINING,
@@ -371,12 +353,11 @@ struct sas_ha_struct {
struct mutex disco_mutex;
- struct scsi_core core;
+ struct Scsi_Host *shost;
/* public: */
char *sas_ha_name;
struct device *dev; /* should be set */
- struct module *lldd_module; /* should be set */
struct workqueue_struct *event_q;
struct workqueue_struct *disco_q;
@@ -544,12 +525,9 @@ struct sas_ata_task {
struct host_to_dev_fis fis;
u8 atapi_packet[16]; /* 0 if not ATAPI task */
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 dma_xfer:1; /* PIO:0 or DMA:1 */
u8 use_ncq:1;
- u8 set_affil_pol:1;
- u8 stp_affil_pol:1;
+ u8 return_fis_on_success:1;
u8 device_control_reg_update:1;
@@ -582,12 +560,8 @@ enum task_attribute {
};
struct sas_ssp_task {
- u8 retry_count; /* hardware retry, should be > 0 */
-
u8 LUN[8];
- u8 enable_first_burst:1;
enum task_attribute task_attr;
- u8 task_prio;
struct scsi_cmnd *cmd;
};
@@ -727,8 +701,6 @@ extern struct device_attribute dev_attr_phy_event_threshold;
int sas_discover_root_expander(struct domain_device *);
-void sas_init_ex_attr(void);
-
int sas_ex_revalidate_domain(struct domain_device *);
void sas_unregister_domain_devices(struct asd_sas_port *port, int gone);
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 70b7475dcf56..49f768d0ff37 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -764,12 +764,12 @@ scsi_template_proc_dir(const struct scsi_host_template *sht);
#define scsi_template_proc_dir(sht) NULL
#endif
extern void scsi_scan_host(struct Scsi_Host *);
-extern void scsi_rescan_device(struct device *);
+extern void scsi_rescan_device(struct scsi_device *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
-extern struct Scsi_Host *scsi_host_lookup(unsigned short);
+extern struct Scsi_Host *scsi_host_lookup(unsigned int hostnum);
extern const char *scsi_host_state_name(enum scsi_host_state);
extern void scsi_host_complete_all_commands(struct Scsi_Host *shost,
enum scsi_host_status status);
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 34c03707fb6e..fb3399e4cd29 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -472,7 +472,6 @@ extern struct iscsi_iface *iscsi_create_iface(struct Scsi_Host *shost,
uint32_t iface_type,
uint32_t iface_num, int dd_size);
extern void iscsi_destroy_iface(struct iscsi_iface *iface);
-extern struct iscsi_iface *iscsi_lookup_iface(int handle);
extern char *iscsi_get_port_speed_name(struct Scsi_Host *shost);
extern char *iscsi_get_port_state_name(struct Scsi_Host *shost);
extern int iscsi_is_session_dev(const struct device *dev);
diff --git a/include/soc/at91/atmel_tcb.h b/include/soc/at91/atmel_tcb.h
index 1d7071dc0bca..26b56a07bd1f 100644
--- a/include/soc/at91/atmel_tcb.h
+++ b/include/soc/at91/atmel_tcb.h
@@ -77,9 +77,6 @@ struct atmel_tc {
bool allocated;
};
-extern struct atmel_tc *atmel_tc_alloc(unsigned block);
-extern void atmel_tc_free(struct atmel_tc *tc);
-
/* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */
extern const u8 atmel_tc_divisors[5];
diff --git a/include/soc/imx/revision.h b/include/soc/imx/revision.h
index b2a55dafaf0a..b122d2fc8881 100644
--- a/include/soc/imx/revision.h
+++ b/include/soc/imx/revision.h
@@ -22,6 +22,7 @@
#define IMX_CHIP_REVISION_3_3 0x33
#define IMX_CHIP_REVISION_UNKNOWN 0xff
+int mx25_revision(void);
int mx27_revision(void);
int mx31_revision(void);
int mx35_revision(void);
diff --git a/include/soc/mediatek/smi.h b/include/soc/mediatek/smi.h
index dfd8efca5e60..000eb1cf68b7 100644
--- a/include/soc/mediatek/smi.h
+++ b/include/soc/mediatek/smi.h
@@ -13,6 +13,7 @@
enum iommu_atf_cmd {
IOMMU_ATF_CMD_CONFIG_SMI_LARB, /* For mm master to en/disable iommu */
+ IOMMU_ATF_CMD_CONFIG_INFRA_IOMMU, /* For infra master to enable iommu */
IOMMU_ATF_CMD_MAX,
};
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index d91289c6f00e..bcf872c17dd3 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -148,7 +148,7 @@ struct snd_compr_ops {
*/
struct snd_compr {
const char *name;
- struct device dev;
+ struct device *dev;
struct snd_compr_ops *ops;
void *private_data;
struct snd_card *card;
diff --git a/include/sound/control.h b/include/sound/control.h
index cc3dcc6cfb0f..9a4f4f7138da 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -140,8 +140,32 @@ int snd_ctl_remove_id(struct snd_card * card, struct snd_ctl_elem_id *id);
int snd_ctl_rename_id(struct snd_card * card, struct snd_ctl_elem_id *src_id, struct snd_ctl_elem_id *dst_id);
void snd_ctl_rename(struct snd_card *card, struct snd_kcontrol *kctl, const char *name);
int snd_ctl_activate_id(struct snd_card *card, struct snd_ctl_elem_id *id, int active);
-struct snd_kcontrol *snd_ctl_find_numid(struct snd_card * card, unsigned int numid);
-struct snd_kcontrol *snd_ctl_find_id(struct snd_card * card, struct snd_ctl_elem_id *id);
+struct snd_kcontrol *snd_ctl_find_numid_locked(struct snd_card *card, unsigned int numid);
+struct snd_kcontrol *snd_ctl_find_numid(struct snd_card *card, unsigned int numid);
+struct snd_kcontrol *snd_ctl_find_id_locked(struct snd_card *card, const struct snd_ctl_elem_id *id);
+struct snd_kcontrol *snd_ctl_find_id(struct snd_card *card, const struct snd_ctl_elem_id *id);
+
+/**
+ * snd_ctl_find_id_mixer - find the control instance with the given name string
+ * @card: the card instance
+ * @name: the name string
+ *
+ * Finds the control instance with the given name and
+ * @SNDRV_CTL_ELEM_IFACE_MIXER. Other fields are set to zero.
+ *
+ * This is merely a wrapper to snd_ctl_find_id().
+ *
+ * Return: The pointer of the instance if found, or %NULL if not.
+ */
+static inline struct snd_kcontrol *
+snd_ctl_find_id_mixer(struct snd_card *card, const char *name)
+{
+ struct snd_ctl_elem_id id = {};
+
+ id.iface = SNDRV_CTL_ELEM_IFACE_MIXER;
+ strscpy(id.name, name, sizeof(id.name));
+ return snd_ctl_find_id(card, &id);
+}
int snd_ctl_create(struct snd_card *card);
@@ -238,6 +262,9 @@ snd_ctl_add_follower(struct snd_kcontrol *master, struct snd_kcontrol *follower)
return _snd_ctl_add_follower(master, follower, 0);
}
+int snd_ctl_add_followers(struct snd_card *card, struct snd_kcontrol *master,
+ const char * const *list);
+
/**
* snd_ctl_add_follower_uncached - Add a virtual follower control
* @master: vmaster element
diff --git a/include/sound/core.h b/include/sound/core.h
index f6e0dd648b80..dfef0c9d4b9f 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -96,7 +96,7 @@ struct snd_card {
private data */
struct list_head devices; /* devices */
- struct device ctl_dev; /* control device */
+ struct device *ctl_dev; /* control device */
unsigned int last_numid; /* last used numeric ID */
struct rw_semaphore controls_rwsem; /* controls lock (list and values) */
rwlock_t ctl_files_rwlock; /* ctl_files list lock */
@@ -239,7 +239,7 @@ extern struct dentry *sound_debugfs_root;
void snd_request_card(int card);
-void snd_device_initialize(struct device *dev, struct snd_card *card);
+int snd_device_alloc(struct device **dev_p, struct snd_card *card);
int snd_register_device(int type, struct snd_card *card, int dev,
const struct file_operations *f_ops,
diff --git a/include/sound/cs35l41.h b/include/sound/cs35l41.h
index 7239d943942c..1bf757901d02 100644
--- a/include/sound/cs35l41.h
+++ b/include/sound/cs35l41.h
@@ -829,6 +829,7 @@ enum cs35l41_cspl_mbox_cmd {
CSPL_MBOX_CMD_STOP_PRE_REINIT = 4,
CSPL_MBOX_CMD_HIBERNATE = 5,
CSPL_MBOX_CMD_OUT_OF_HIBERNATE = 6,
+ CSPL_MBOX_CMD_SPK_OUT_ENABLE = 7,
CSPL_MBOX_CMD_UNKNOWN_CMD = -1,
CSPL_MBOX_CMD_INVALID_SEQUENCE = -2,
};
@@ -901,7 +902,7 @@ int cs35l41_exit_hibernate(struct device *dev, struct regmap *regmap);
int cs35l41_init_boost(struct device *dev, struct regmap *regmap,
struct cs35l41_hw_cfg *hw_cfg);
bool cs35l41_safe_reset(struct regmap *regmap, enum cs35l41_boost_type b_type);
-int cs35l41_global_enable(struct regmap *regmap, enum cs35l41_boost_type b_type, int enable,
- struct completion *pll_lock);
+int cs35l41_global_enable(struct device *dev, struct regmap *regmap, enum cs35l41_boost_type b_type,
+ int enable, struct completion *pll_lock, bool firmware_running);
#endif /* __CS35L41_H */
diff --git a/include/sound/cs35l56.h b/include/sound/cs35l56.h
index 1f9713d7ca76..3950322bf3cb 100644
--- a/include/sound/cs35l56.h
+++ b/include/sound/cs35l56.h
@@ -252,15 +252,40 @@
#define CS35L56_NUM_BULK_SUPPLIES 3
#define CS35L56_NUM_DSP_REGIONS 5
+struct cs35l56_base {
+ struct device *dev;
+ struct regmap *regmap;
+ int irq;
+ struct mutex irq_lock;
+ u8 rev;
+ bool init_done;
+ bool fw_patched;
+ bool secured;
+ bool can_hibernate;
+ struct gpio_desc *reset_gpio;
+};
+
extern struct regmap_config cs35l56_regmap_i2c;
extern struct regmap_config cs35l56_regmap_spi;
extern struct regmap_config cs35l56_regmap_sdw;
-extern const struct cs_dsp_region cs35l56_dsp1_regions[CS35L56_NUM_DSP_REGIONS];
extern const char * const cs35l56_tx_input_texts[CS35L56_NUM_INPUT_SRC];
extern const unsigned int cs35l56_tx_input_values[CS35L56_NUM_INPUT_SRC];
-void cs35l56_reread_firmware_registers(struct device *dev, struct regmap *regmap);
+int cs35l56_set_patch(struct cs35l56_base *cs35l56_base);
+int cs35l56_mbox_send(struct cs35l56_base *cs35l56_base, unsigned int command);
+int cs35l56_firmware_shutdown(struct cs35l56_base *cs35l56_base);
+int cs35l56_wait_for_firmware_boot(struct cs35l56_base *cs35l56_base);
+void cs35l56_wait_control_port_ready(void);
+void cs35l56_wait_min_reset_pulse(void);
+void cs35l56_system_reset(struct cs35l56_base *cs35l56_base, bool is_soundwire);
+int cs35l56_irq_request(struct cs35l56_base *cs35l56_base, int irq);
+irqreturn_t cs35l56_irq(int irq, void *data);
+int cs35l56_is_fw_reload_needed(struct cs35l56_base *cs35l56_base);
+int cs35l56_runtime_suspend_common(struct cs35l56_base *cs35l56_base);
+int cs35l56_runtime_resume_common(struct cs35l56_base *cs35l56_base, bool is_soundwire);
+void cs35l56_init_cs_dsp(struct cs35l56_base *cs35l56_base, struct cs_dsp *cs_dsp);
+int cs35l56_hw_init(struct cs35l56_base *cs35l56_base);
int cs35l56_get_bclk_freq_id(unsigned int freq);
void cs35l56_fill_supply_names(struct regulator_bulk_data *data);
diff --git a/include/sound/cs42l43.h b/include/sound/cs42l43.h
new file mode 100644
index 000000000000..deb337fc4e8c
--- /dev/null
+++ b/include/sound/cs42l43.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CS42L43 CODEC driver external data
+ *
+ * Copyright (C) 2022-2023 Cirrus Logic, Inc. and
+ * Cirrus Logic International Semiconductor Ltd.
+ */
+
+#ifndef CS42L43_ASOC_EXT_H
+#define CS42L43_ASOC_EXT_H
+
+#define CS42L43_SYSCLK 0
+
+#define CS42L43_SYSCLK_MCLK 0
+#define CS42L43_SYSCLK_SDW 1
+
+#endif /* CS42L43_ASOC_EXT_H */
diff --git a/include/sound/designware_i2s.h b/include/sound/designware_i2s.h
index 80d275b9ae0d..f6803205a9fb 100644
--- a/include/sound/designware_i2s.h
+++ b/include/sound/designware_i2s.h
@@ -21,6 +21,8 @@ struct i2s_clk_config_data {
u32 sample_rate;
};
+struct dw_i2s_dev;
+
struct i2s_platform_data {
#define DWC_I2S_PLAY (1 << 0)
#define DWC_I2S_RECORD (1 << 1)
@@ -42,6 +44,7 @@ struct i2s_platform_data {
void *capture_dma_data;
bool (*filter)(struct dma_chan *chan, void *slave);
int (*i2s_clk_cfg)(struct i2s_clk_config_data *config);
+ int (*i2s_pd_init)(struct dw_i2s_dev *dev);
};
struct i2s_dma_data {
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index 2df54cf02cb3..d70c55f17df7 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -142,7 +142,7 @@ struct snd_dmaengine_pcm_config {
struct snd_pcm_substream *substream);
int (*process)(struct snd_pcm_substream *substream,
int channel, unsigned long hwoff,
- void *buf, unsigned long bytes);
+ unsigned long bytes);
dma_filter_fn compat_filter_fn;
struct device *dma_dev;
const char *chan_names[SNDRV_PCM_STREAM_LAST + 1];
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h
index 386a5f3be3e0..1af9e6819392 100644
--- a/include/sound/emu10k1.h
+++ b/include/sound/emu10k1.h
@@ -902,6 +902,11 @@ SUB_REG_NC(A_EHC, A_I2S_CAPTURE_RATE, 0x00000e00) /* This sets the capture PCM
#define A_TTDA 0x7a /* Tank Table DMA Address */
#define A_TTDD 0x7b /* Tank Table DMA Data */
+// In A_FXRT1 & A_FXRT2, the 0x80 bit of each byte completely disables the
+// filter (CVCF_CURRENTFILTER) for the corresponding channel. There is no
+// effect on the volume (CVCF_CURRENTVOLUME) or the interpolator's filter
+// (CCCA_INTERPROM_MASK).
+
#define A_FXRT2 0x7c
#define A_FXRT_CHANNELE 0x0000003f /* Effects send bus number for channel's effects send E */
#define A_FXRT_CHANNELF 0x00003f00 /* Effects send bus number for channel's effects send F */
@@ -914,8 +919,6 @@ SUB_REG_NC(A_EHC, A_I2S_CAPTURE_RATE, 0x00000e00) /* This sets the capture PCM
#define A_FXSENDAMOUNT_G_MASK 0x0000FF00
#define A_FXSENDAMOUNT_H_MASK 0x000000FF
-/* 0x7c, 0x7e "high bit is used for filtering" */
-
/* The send amounts for this one are the same as used with the emu10k1 */
#define A_FXRT1 0x7e
#define A_FXRT_CHANNELA 0x0000003f
@@ -992,6 +995,9 @@ SUB_REG_NC(A_EHC, A_I2S_CAPTURE_RATE, 0x00000e00) /* This sets the capture PCM
#define EMU_HANA_WCLOCK_4X 0x10
#define EMU_HANA_WCLOCK_MULT_RESERVED 0x18
+// If the selected external clock source is/becomes invalid or incompatible
+// with the clock multiplier, the clock source is reset to this value, and
+// a WCLK_CHANGED interrupt is raised.
#define EMU_HANA_DEFCLOCK 0x06 /* 000000x 1 bits Default Word Clock */
#define EMU_HANA_DEFCLOCK_48K 0x00
#define EMU_HANA_DEFCLOCK_44_1K 0x01
@@ -1523,10 +1529,10 @@ struct snd_emu10k1_pcm_mixer {
((route[0] | (route[1] << 4) | (route[2] << 8) | (route[3] << 12)) << 16)
#define snd_emu10k1_compose_audigy_fxrt1(route) \
-((unsigned int)route[0] | ((unsigned int)route[1] << 8) | ((unsigned int)route[2] << 16) | ((unsigned int)route[3] << 24))
+((unsigned int)route[0] | ((unsigned int)route[1] << 8) | ((unsigned int)route[2] << 16) | ((unsigned int)route[3] << 24) | 0x80808080)
#define snd_emu10k1_compose_audigy_fxrt2(route) \
-((unsigned int)route[4] | ((unsigned int)route[5] << 8) | ((unsigned int)route[6] << 16) | ((unsigned int)route[7] << 24))
+((unsigned int)route[4] | ((unsigned int)route[5] << 8) | ((unsigned int)route[6] << 16) | ((unsigned int)route[7] << 24) | 0x80808080)
#define snd_emu10k1_compose_audigy_sendamounts(vol) \
(((unsigned int)vol[4] << 24) | ((unsigned int)vol[5] << 16) | ((unsigned int)vol[6] << 8) | (unsigned int)vol[7])
@@ -1678,8 +1684,8 @@ struct snd_emu1010 {
unsigned int clock_fallback;
unsigned int optical_in; /* 0:SPDIF, 1:ADAT */
unsigned int optical_out; /* 0:SPDIF, 1:ADAT */
- struct delayed_work firmware_work;
- u32 last_reg;
+ struct work_struct firmware_work;
+ struct work_struct clock_work;
};
struct snd_emu10k1 {
@@ -1754,6 +1760,7 @@ struct snd_emu10k1 {
struct snd_kcontrol *ctl_efx_send_routing;
struct snd_kcontrol *ctl_efx_send_volume;
struct snd_kcontrol *ctl_efx_attn;
+ struct snd_kcontrol *ctl_clock_source;
void (*hwvol_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*capture_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
@@ -1761,6 +1768,7 @@ struct snd_emu10k1 {
void (*capture_efx_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*spdif_interrupt)(struct snd_emu10k1 *emu, unsigned int status);
void (*dsp_interrupt)(struct snd_emu10k1 *emu);
+ void (*gpio_interrupt)(struct snd_emu10k1 *emu);
void (*p16v_interrupt)(struct snd_emu10k1 *emu);
struct snd_pcm_substream *pcm_capture_substream;
diff --git a/include/sound/hda-mlink.h b/include/sound/hda-mlink.h
index 4f44f0bd5388..228114aca415 100644
--- a/include/sound/hda-mlink.h
+++ b/include/sound/hda-mlink.h
@@ -42,6 +42,7 @@ int hdac_bus_eml_power_down_unlocked(struct hdac_bus *bus, bool alt, int elid, i
int hdac_bus_eml_sdw_power_up_unlocked(struct hdac_bus *bus, int sublink);
int hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink);
+int hdac_bus_eml_sdw_get_lsdiid_unlocked(struct hdac_bus *bus, int sublink, u16 *lsdiid);
int hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num);
int hdac_bus_eml_sdw_map_stream_ch(struct hdac_bus *bus, int sublink, int y,
@@ -146,6 +147,9 @@ static inline int
hdac_bus_eml_sdw_power_down_unlocked(struct hdac_bus *bus, int sublink) { return 0; }
static inline int
+hdac_bus_eml_sdw_get_lsdiid_unlocked(struct hdac_bus *bus, int sublink, u16 *lsdiid) { return 0; }
+
+static inline int
hdac_bus_eml_sdw_set_lsdiid(struct hdac_bus *bus, int sublink, int dev_num) { return 0; }
static inline int
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index bbb7805e85d8..5497dc9c396a 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -18,9 +18,6 @@
#include <sound/hda_verbs.h>
#include <sound/hda_regmap.h>
-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_CFL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa348)
-
/*
* Structures
*/
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 2ffdf58bd6d4..32c59053b48e 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -11,6 +11,7 @@
#include <linux/io.h>
#include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/iopoll.h>
+#include <linux/pci.h>
#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
@@ -704,4 +705,29 @@ static inline unsigned int snd_array_index(struct snd_array *array, void *ptr)
for ((idx) = 0, (ptr) = (array)->list; (idx) < (array)->used; \
(ptr) = snd_array_elem(array, ++(idx)))
+/*
+ * Device matching
+ */
+
+#define HDA_CONTROLLER_IS_HSW(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_0) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_2) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_HSW_3) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_BDW) }, \
+ { } \
+ }, pci))
+
+#define HDA_CONTROLLER_IS_APL(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_APL) }, \
+ { } \
+ }, pci))
+
+#define HDA_CONTROLLER_IN_GPU(pci) (pci_match_id((struct pci_device_id []){ \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG1) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_0) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_1) }, \
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HDA_DG2_2) }, \
+ { } \
+ }, pci) || HDA_CONTROLLER_IS_HSW(pci))
+
#endif /* __SOUND_HDAUDIO_H */
diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
index 8d6cdb254039..b0da633184cd 100644
--- a/include/sound/hwdep.h
+++ b/include/sound/hwdep.h
@@ -53,7 +53,7 @@ struct snd_hwdep {
wait_queue_head_t open_wait;
void *private_data;
void (*private_free) (struct snd_hwdep *hwdep);
- struct device dev;
+ struct device *dev;
struct mutex open_mutex;
int used; /* reference counter */
diff --git a/include/sound/info.h b/include/sound/info.h
index 7c13bf52cc81..adbc506860d6 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -118,8 +118,6 @@ struct snd_info_entry *snd_info_create_card_entry(struct snd_card *card,
const char *name,
struct snd_info_entry *parent);
void snd_info_free_entry(struct snd_info_entry *entry);
-int snd_info_store_text(struct snd_info_entry *entry);
-int snd_info_restore_text(struct snd_info_entry *entry);
int snd_info_card_create(struct snd_card *card);
int snd_info_card_register(struct snd_card *card);
diff --git a/include/sound/l3.h b/include/sound/l3.h
deleted file mode 100644
index b6f58072237a..000000000000
--- a/include/sound/l3.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _L3_H_
-#define _L3_H_ 1
-
-struct l3_pins {
- void (*setdat)(struct l3_pins *, int);
- void (*setclk)(struct l3_pins *, int);
- void (*setmode)(struct l3_pins *, int);
-
- int gpio_data;
- int gpio_clk;
- int gpio_mode;
- int use_gpios;
-
- int data_hold;
- int data_setup;
- int clock_high;
- int mode_hold;
- int mode;
- int mode_setup;
-};
-
-struct device;
-
-int l3_write(struct l3_pins *adap, u8 addr, u8 *data, int len);
-int l3_set_gpio_ops(struct device *dev, struct l3_pins *adap);
-
-#endif
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 19f564606ac4..2a815373dac1 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -16,6 +16,7 @@
#include <linux/bitops.h>
#include <linux/pm_qos.h>
#include <linux/refcount.h>
+#include <linux/uio.h>
#define snd_pcm_substream_chip(substream) ((substream)->private_data)
#define snd_pcm_chip(pcm) ((pcm)->private_data)
@@ -68,11 +69,8 @@ struct snd_pcm_ops {
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*fill_silence)(struct snd_pcm_substream *substream, int channel,
unsigned long pos, unsigned long bytes);
- int (*copy_user)(struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void __user *buf,
- unsigned long bytes);
- int (*copy_kernel)(struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void *buf, unsigned long bytes);
+ int (*copy)(struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, struct iov_iter *iter, unsigned long bytes);
struct page *(*page)(struct snd_pcm_substream *substream,
unsigned long offset);
int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
@@ -510,7 +508,7 @@ struct snd_pcm_str {
#endif
#endif
struct snd_kcontrol *chmap_kctl; /* channel-mapping controls */
- struct device dev;
+ struct device *dev;
};
struct snd_pcm {
@@ -1556,6 +1554,11 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
#define pcm_dbg(pcm, fmt, args...) \
dev_dbg((pcm)->card->dev, fmt, ##args)
+/* helpers for copying between iov_iter and iomem */
+int copy_to_iter_fromio(struct iov_iter *itert, const void __iomem *src,
+ size_t count);
+int copy_from_iter_toio(void __iomem *dst, struct iov_iter *iter, size_t count);
+
struct snd_pcm_status64 {
snd_pcm_state_t state; /* stream state */
u8 rsvd[4];
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index b0197b1d1fe4..f31cabf0158c 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -135,7 +135,7 @@ struct snd_rawmidi {
struct mutex open_mutex;
wait_queue_head_t open_wait;
- struct device dev;
+ struct device *dev;
struct snd_info_entry *proc_entry;
diff --git a/include/sound/rt5665.h b/include/sound/rt5665.h
index 3b3d6a19ca49..e865f041929b 100644
--- a/include/sound/rt5665.h
+++ b/include/sound/rt5665.h
@@ -31,8 +31,6 @@ struct rt5665_platform_data {
bool in3_diff;
bool in4_diff;
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5665_dmic1_data_pin dmic1_data_pin;
enum rt5665_dmic2_data_pin dmic2_data_pin;
enum rt5665_jd_src jd_src;
diff --git a/include/sound/rt5668.h b/include/sound/rt5668.h
index 182edfbc9e7a..b682418c6cd6 100644
--- a/include/sound/rt5668.h
+++ b/include/sound/rt5668.h
@@ -25,9 +25,6 @@ enum rt5668_jd_src {
};
struct rt5668_platform_data {
-
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5668_dmic1_data_pin dmic1_data_pin;
enum rt5668_dmic1_clk_pin dmic1_clk_pin;
enum rt5668_jd_src jd_src;
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
index 3900a07e3935..4256df721e3a 100644
--- a/include/sound/rt5682.h
+++ b/include/sound/rt5682.h
@@ -31,9 +31,6 @@ enum rt5682_dai_clks {
};
struct rt5682_platform_data {
-
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5682_dmic1_data_pin dmic1_data_pin;
enum rt5682_dmic1_clk_pin dmic1_clk_pin;
enum rt5682_jd_src jd_src;
diff --git a/include/sound/rt5682s.h b/include/sound/rt5682s.h
index f18d91308b9a..66ca0c75b914 100644
--- a/include/sound/rt5682s.h
+++ b/include/sound/rt5682s.h
@@ -32,9 +32,6 @@ enum rt5682s_dai_clks {
};
struct rt5682s_platform_data {
-
- int ldo1_en; /* GPIO for LDO1_EN */
-
enum rt5682s_dmic1_data_pin dmic1_data_pin;
enum rt5682s_dmic1_clk_pin dmic1_clk_pin;
enum rt5682s_jd_src jd_src;
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index b450d5873227..d1a95bc33c56 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -192,9 +192,8 @@ int asoc_simple_remove(struct platform_device *pdev);
int asoc_graph_card_probe(struct snd_soc_card *card);
int asoc_graph_is_ports0(struct device_node *port);
-int asoc_graph_parse_dai(struct device_node *ep,
- struct snd_soc_dai_link_component *dlc,
- int *is_single_link);
+int asoc_graph_parse_dai(struct device *dev, struct device_node *ep,
+ struct snd_soc_dai_link_component *dlc, int *is_single_link);
#ifdef DEBUG
static inline void asoc_simple_debug_dai(struct asoc_simple_priv *priv,
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 528279056b3a..6d31d535e8f6 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -9,6 +9,7 @@
#include <linux/stddef.h>
#include <linux/acpi.h>
#include <linux/mod_devicetable.h>
+#include <linux/soundwire/sdw.h>
struct snd_soc_acpi_package_context {
char *name; /* package name */
@@ -150,6 +151,7 @@ struct snd_soc_acpi_link_adr {
* all firmware/topology related fields.
*
* @id: ACPI ID (usually the codec's) used to find a matching machine driver.
+ * @uid: ACPI Unique ID, can be used to disambiguate matches.
* @comp_ids: list of compatible audio codecs using the same machine driver,
* firmware and topology
* @link_mask: describes required board layout, e.g. for SoundWire.
@@ -208,4 +210,9 @@ static inline bool snd_soc_acpi_sof_parent(struct device *dev)
!strncmp(dev->parent->driver->name, "sof-audio-acpi", strlen("sof-audio-acpi"));
}
+bool snd_soc_acpi_sdw_link_slaves_found(struct device *dev,
+ const struct snd_soc_acpi_link_adr *link,
+ struct sdw_extended_slave_id *ids,
+ int num_slaves);
+
#endif
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 87f248a06271..ceca69b46a82 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -137,10 +137,10 @@ struct snd_soc_component_driver {
struct timespec64 *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
- int (*copy_user)(struct snd_soc_component *component,
- struct snd_pcm_substream *substream, int channel,
- unsigned long pos, void __user *buf,
- unsigned long bytes);
+ int (*copy)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, struct iov_iter *iter,
+ unsigned long bytes);
struct page *(*page)(struct snd_soc_component *component,
struct snd_pcm_substream *substream,
unsigned long offset);
@@ -509,9 +509,9 @@ int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
-int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
- int channel, unsigned long pos,
- void __user *buf, unsigned long bytes);
+int snd_soc_pcm_component_copy(struct snd_pcm_substream *substream,
+ int channel, unsigned long pos,
+ struct iov_iter *iter, unsigned long bytes);
struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e3906ecda740..5fcfba47d98c 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -271,7 +271,18 @@ int snd_soc_dai_compr_get_metadata(struct snd_soc_dai *dai,
struct snd_compr_stream *cstream,
struct snd_compr_metadata *metadata);
+const char *snd_soc_dai_name_get(struct snd_soc_dai *dai);
+
struct snd_soc_dai_ops {
+ /* DAI driver callbacks */
+ int (*probe)(struct snd_soc_dai *dai);
+ int (*remove)(struct snd_soc_dai *dai);
+ /* compress dai */
+ int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
+ /* Optional Callback used at pcm creation*/
+ int (*pcm_new)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_soc_dai *dai);
+
/*
* DAI clocking configuration, all optional.
* Called by soc_card drivers, normally in their hw_params.
@@ -353,6 +364,10 @@ struct snd_soc_dai_ops {
u64 *auto_selectable_formats;
int num_auto_selectable_formats;
+ /* probe ordering - for components with runtime dependencies */
+ int probe_order;
+ int remove_order;
+
/* bit field */
unsigned int no_capture_mute:1;
};
@@ -397,15 +412,7 @@ struct snd_soc_dai_driver {
unsigned int id;
unsigned int base;
struct snd_soc_dobj dobj;
-
- /* DAI driver callbacks */
- int (*probe)(struct snd_soc_dai *dai);
- int (*remove)(struct snd_soc_dai *dai);
- /* compress dai */
- int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
- /* Optional Callback used at pcm creation*/
- int (*pcm_new)(struct snd_soc_pcm_runtime *rtd,
- struct snd_soc_dai *dai);
+ struct of_phandle_args *dai_args;
/* ops */
const struct snd_soc_dai_ops *ops;
@@ -417,10 +424,6 @@ struct snd_soc_dai_driver {
unsigned int symmetric_rate:1;
unsigned int symmetric_channels:1;
unsigned int symmetric_sample_bits:1;
-
- /* probe ordering - for components with runtime dependencies */
- int probe_order;
- int remove_order;
};
/* for Playback/Capture */
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 87f8e1793af1..d2faec9a323e 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -42,36 +42,45 @@ struct soc_enum;
/* codec domain */
#define SND_SOC_DAPM_VMID(wname) \
-{ .id = snd_soc_dapm_vmid, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_vmid, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0}
/* platform domain */
#define SND_SOC_DAPM_SIGGEN(wname) \
-{ .id = snd_soc_dapm_siggen, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_siggen, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_SINK(wname) \
-{ .id = snd_soc_dapm_sink, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_sink, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_INPUT(wname) \
-{ .id = snd_soc_dapm_input, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_input, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_OUTPUT(wname) \
-{ .id = snd_soc_dapm_output, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_output, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM }
#define SND_SOC_DAPM_MIC(wname, wevent) \
-{ .id = snd_soc_dapm_mic, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mic, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
#define SND_SOC_DAPM_HP(wname, wevent) \
-{ .id = snd_soc_dapm_hp, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_hp, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_SPK(wname, wevent) \
-{ .id = snd_soc_dapm_spk, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_spk, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_LINE(wname, wevent) \
-{ .id = snd_soc_dapm_line, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_line, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD}
@@ -82,93 +91,110 @@ struct soc_enum;
/* path domain */
#define SND_SOC_DAPM_PGA(wname, wreg, wshift, winvert,\
wcontrols, wncontrols) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_OUT_DRV(wname, wreg, wshift, winvert,\
wcontrols, wncontrols) \
-{ .id = snd_soc_dapm_out_drv, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_out_drv, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MIXER(wname, wreg, wshift, winvert, \
wcontrols, wncontrols)\
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
#define SND_SOC_DAPM_MIXER_NAMED_CTL(wname, wreg, wshift, winvert, \
wcontrols, wncontrols)\
-{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols}
/* DEPRECATED: use SND_SOC_DAPM_SUPPLY */
#define SND_SOC_DAPM_MICBIAS(wname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_micbias, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_micbias, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = NULL, .num_kcontrols = 0}
#define SND_SOC_DAPM_SWITCH(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_switch, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_switch, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_MUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_mux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
#define SND_SOC_DAPM_DEMUX(wname, wreg, wshift, winvert, wcontrols) \
-{ .id = snd_soc_dapm_demux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_demux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_ARRAY(wname, wreg, wshift, winvert,\
wcontrols) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
#define SOC_MIXER_ARRAY(wname, wreg, wshift, winvert, \
wcontrols)\
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
#define SOC_MIXER_NAMED_CTL_ARRAY(wname, wreg, wshift, winvert, \
wcontrols)\
-{ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer_named_ctl, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols)}
/* path domain with event - event handler must return 0 for success */
#define SND_SOC_DAPM_PGA_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_OUT_DRV_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_out_drv, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_out_drv, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MIXER_E(wname, wreg, wshift, winvert, wcontrols, \
wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = wncontrols, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MIXER_NAMED_CTL_E(wname, wreg, wshift, winvert, \
wcontrols, wncontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, \
.num_kcontrols = wncontrols, .event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_SWITCH_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_switch, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_switch, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_MUX_E(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_mux, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mux, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = 1, \
.event = wevent, .event_flags = wflags}
@@ -176,101 +202,121 @@ struct soc_enum;
/* additional sequencing control within an event type */
#define SND_SOC_DAPM_PGA_S(wname, wsubseq, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags, \
.subseq = wsubseq}
#define SND_SOC_DAPM_SUPPLY_S(wname, wsubseq, wreg, wshift, winvert, wevent, \
wflags) \
-{ .id = snd_soc_dapm_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_supply, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags, .subseq = wsubseq}
/* Simplified versions of above macros, assuming wncontrols = ARRAY_SIZE(wcontrols) */
#define SOC_PGA_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_pga, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pga, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
#define SOC_MIXER_E_ARRAY(wname, wreg, wshift, winvert, wcontrols, \
wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
#define SOC_MIXER_NAMED_CTL_E_ARRAY(wname, wreg, wshift, winvert, \
wcontrols, wevent, wflags) \
-{ .id = snd_soc_dapm_mixer, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_mixer, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.kcontrol_news = wcontrols, .num_kcontrols = ARRAY_SIZE(wcontrols), \
.event = wevent, .event_flags = wflags}
/* events that are pre and post DAPM */
#define SND_SOC_DAPM_PRE(wname, wevent) \
-{ .id = snd_soc_dapm_pre, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pre, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD}
#define SND_SOC_DAPM_POST(wname, wevent) \
-{ .id = snd_soc_dapm_post, .name = wname, .kcontrol_news = NULL, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_post, .name = wname, .kcontrol_news = NULL, \
.num_kcontrols = 0, .reg = SND_SOC_NOPM, .event = wevent, \
.event_flags = SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD}
/* stream domain */
#define SND_SOC_DAPM_AIF_IN(wname, stname, wchan, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_AIF_IN_E(wname, stname, wchan, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_in, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_AIF_OUT(wname, stname, wchan, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_AIF_OUT_E(wname, stname, wchan, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_aif_out, .name = wname, .sname = stname, \
.channel = wchan, SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags }
#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert) }
#define SND_SOC_DAPM_DAC_E(wname, stname, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \
-{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), }
#define SND_SOC_DAPM_ADC_E(wname, stname, wreg, wshift, winvert, \
wevent, wflags) \
-{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_CLOCK_SUPPLY(wname) \
-{ .id = snd_soc_dapm_clock_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_clock_supply, .name = wname, \
.reg = SND_SOC_NOPM, .event = dapm_clock_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD }
/* generic widgets */
#define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \
-{ .id = wid, .name = wname, .kcontrol_news = NULL, .num_kcontrols = 0, \
+(struct snd_soc_dapm_widget) { \
+ .id = wid, .name = wname, .kcontrol_news = NULL, .num_kcontrols = 0, \
.reg = wreg, .shift = wshift, .mask = wmask, \
.on_val = won_val, .off_val = woff_val, }
#define SND_SOC_DAPM_SUPPLY(wname, wreg, wshift, winvert, wevent, wflags) \
-{ .id = snd_soc_dapm_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_supply, .name = wname, \
SND_SOC_DAPM_INIT_REG_VAL(wreg, wshift, winvert), \
.event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_REGULATOR_SUPPLY(wname, wdelay, wflags) \
-{ .id = snd_soc_dapm_regulator_supply, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_regulator_supply, .name = wname, \
.reg = SND_SOC_NOPM, .shift = wdelay, .event = dapm_regulator_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD, \
.on_val = wflags}
#define SND_SOC_DAPM_PINCTRL(wname, active, sleep) \
-{ .id = snd_soc_dapm_pinctrl, .name = wname, \
+(struct snd_soc_dapm_widget) { \
+ .id = snd_soc_dapm_pinctrl, .name = wname, \
.priv = (&(struct snd_soc_dapm_pinctrl_priv) \
{ .active_state = active, .sleep_state = sleep,}), \
.reg = SND_SOC_NOPM, .event = dapm_pinctrl_event, \
@@ -604,6 +650,7 @@ struct snd_soc_dapm_widget {
unsigned char power_checked:1; /* power checked this run */
unsigned char is_supply:1; /* Widget is a supply type widget */
unsigned char is_ep:2; /* Widget is a endpoint type widget */
+ unsigned char no_wname_in_kcontrol_name:1; /* No widget name prefix in kcontrol name */
int subseq; /* sort within widget type */
int (*power_check)(struct snd_soc_dapm_widget *w);
diff --git a/include/sound/soc.h b/include/sound/soc.h
index b27f84580c5b..fa2337a3cf4c 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -651,6 +651,7 @@ struct snd_soc_dai_link_component {
const char *name;
struct device_node *of_node;
const char *dai_name;
+ struct of_phandle_args *dai_args;
};
struct snd_soc_dai_link_codec_ch_map {
@@ -1335,6 +1336,11 @@ int snd_soc_add_pcm_runtimes(struct snd_soc_card *card,
void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
struct snd_soc_pcm_runtime *rtd);
+void snd_soc_dlc_use_cpu_as_platform(struct snd_soc_dai_link_component *platforms,
+ struct snd_soc_dai_link_component *cpus);
+struct of_phandle_args *snd_soc_copy_dai_args(struct device *dev,
+ struct of_phandle_args *args);
+struct snd_soc_dai *snd_soc_get_dai_via_args(struct of_phandle_args *dai_args);
struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
struct snd_soc_dai_driver *dai_drv,
bool legacy_dai_naming);
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index 88560281d420..906e2f327ad2 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -26,9 +26,9 @@ enum sof_comp_type {
SOF_COMP_MIXER,
SOF_COMP_MUX,
SOF_COMP_SRC,
- SOF_COMP_SPLITTER,
+ SOF_COMP_DEPRECATED0, /* Formerly SOF_COMP_SPLITTER */
SOF_COMP_TONE,
- SOF_COMP_SWITCH,
+ SOF_COMP_DEPRECATED1, /* Formerly SOF_COMP_SWITCH */
SOF_COMP_BUFFER,
SOF_COMP_EQ_IIR,
SOF_COMP_EQ_FIR,
diff --git a/include/sound/uda134x.h b/include/sound/uda134x.h
deleted file mode 100644
index db82516da162..000000000000
--- a/include/sound/uda134x.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * uda134x.h -- UDA134x ALSA SoC Codec driver
- *
- * Copyright 2007 Dension Audio Systems Ltd.
- * Author: Zoltan Devai
- */
-
-#ifndef _UDA134X_H
-#define _UDA134X_H
-
-#include <sound/l3.h>
-
-struct uda134x_platform_data {
- struct l3_pins l3;
- void (*power) (int);
- int model;
-#define UDA134X_UDA1340 1
-#define UDA134X_UDA1341 2
-#define UDA134X_UDA1344 3
-#define UDA134X_UDA1345 4
-};
-
-#endif /* _UDA134X_H */
diff --git a/include/sound/ump.h b/include/sound/ump.h
index 44d2c2fd021d..91238dabe307 100644
--- a/include/sound/ump.h
+++ b/include/sound/ump.h
@@ -45,6 +45,7 @@ struct snd_ump_endpoint {
spinlock_t legacy_locks[2];
struct snd_rawmidi *legacy_rmidi;
struct snd_rawmidi_substream *legacy_substreams[2][SNDRV_UMP_MAX_GROUPS];
+ unsigned char legacy_mapping[SNDRV_UMP_MAX_GROUPS];
/* for legacy output; need to open the actual substream unlike input */
int legacy_out_opens;
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 4c15420e8965..60af7c63b34e 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -50,9 +50,6 @@ struct sock;
#define TA_LOGIN_TIMEOUT 15
#define TA_LOGIN_TIMEOUT_MAX 30
#define TA_LOGIN_TIMEOUT_MIN 5
-#define TA_NETIF_TIMEOUT 2
-#define TA_NETIF_TIMEOUT_MAX 15
-#define TA_NETIF_TIMEOUT_MIN 2
#define TA_GENERATE_NODE_ACLS 0
#define TA_DEFAULT_CMDSN_DEPTH 64
#define TA_DEFAULT_CMDSN_DEPTH_MAX 512
@@ -773,7 +770,6 @@ to_iscsi_nacl(struct se_node_acl *se_nacl)
struct iscsi_tpg_attrib {
u32 authentication;
u32 login_timeout;
- u32 netif_timeout;
u32 generate_node_acls;
u32 cache_dynamic_acls;
u32 default_cmdsn_depth;
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index 1f7fc1fc590c..e609cd7da47e 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -12,6 +12,8 @@
#undef __perf_task
#define __perf_task(t) (t)
+#include <linux/args.h>
+
/* cast any integer, pointer, or small struct to u64 */
#define UINTTYPE(size) \
__typeof__(__builtin_choose_expr(size == 1, (u8)1, \
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 40e60c33cc6f..0e128ad51460 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -12,6 +12,7 @@
#define RWBS_LEN 8
+#ifdef CONFIG_BUFFER_HEAD
DECLARE_EVENT_CLASS(block_buffer,
TP_PROTO(struct buffer_head *bh),
@@ -61,6 +62,7 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
TP_ARGS(bh)
);
+#endif /* CONFIG_BUFFER_HEAD */
/**
* block_rq_requeue - place block IO request back on a queue
diff --git a/include/trace/events/dlm.h b/include/trace/events/dlm.h
index 2b09574e1243..c1a146f9fc91 100644
--- a/include/trace/events/dlm.h
+++ b/include/trace/events/dlm.h
@@ -7,6 +7,7 @@
#include <linux/dlm.h>
#include <linux/dlmconstants.h>
+#include <uapi/linux/dlm_plock.h>
#include <linux/tracepoint.h>
#include "../../../fs/dlm/dlm_internal.h"
@@ -585,6 +586,56 @@ TRACE_EVENT(dlm_recv_message,
);
+DECLARE_EVENT_CLASS(dlm_plock_template,
+
+ TP_PROTO(const struct dlm_plock_info *info),
+
+ TP_ARGS(info),
+
+ TP_STRUCT__entry(
+ __field(uint8_t, optype)
+ __field(uint8_t, ex)
+ __field(uint8_t, wait)
+ __field(uint8_t, flags)
+ __field(uint32_t, pid)
+ __field(int32_t, nodeid)
+ __field(int32_t, rv)
+ __field(uint32_t, fsid)
+ __field(uint64_t, number)
+ __field(uint64_t, start)
+ __field(uint64_t, end)
+ __field(uint64_t, owner)
+ ),
+
+ TP_fast_assign(
+ __entry->optype = info->optype;
+ __entry->ex = info->ex;
+ __entry->wait = info->wait;
+ __entry->flags = info->flags;
+ __entry->pid = info->pid;
+ __entry->nodeid = info->nodeid;
+ __entry->rv = info->rv;
+ __entry->fsid = info->fsid;
+ __entry->number = info->number;
+ __entry->start = info->start;
+ __entry->end = info->end;
+ __entry->owner = info->owner;
+ ),
+
+ TP_printk("fsid=%u number=%llx owner=%llx optype=%d ex=%d wait=%d flags=%x pid=%u nodeid=%d rv=%d start=%llx end=%llx",
+ __entry->fsid, __entry->number, __entry->owner,
+ __entry->optype, __entry->ex, __entry->wait,
+ __entry->flags, __entry->pid, __entry->nodeid,
+ __entry->rv, __entry->start, __entry->end)
+
+);
+
+DEFINE_EVENT(dlm_plock_template, dlm_plock_read,
+ TP_PROTO(const struct dlm_plock_info *info), TP_ARGS(info));
+
+DEFINE_EVENT(dlm_plock_template, dlm_plock_write,
+ TP_PROTO(const struct dlm_plock_info *info), TP_ARGS(info));
+
TRACE_EVENT(dlm_send,
TP_PROTO(int nodeid, int ret),
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index c9a72e8432b8..5ff15126ad9d 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -122,6 +122,37 @@ TRACE_EVENT(fsi_master_break,
)
);
+TRACE_EVENT(fsi_master_scan,
+ TP_PROTO(const struct fsi_master *master, bool scan),
+ TP_ARGS(master, scan),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ __field(bool, scan)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ __entry->scan = scan;
+ ),
+ TP_printk("fsi%d (%d links) %s", __entry->master_idx, __entry->n_links,
+ __entry->scan ? "scan" : "unscan")
+);
+
+TRACE_EVENT(fsi_master_unregister,
+ TP_PROTO(const struct fsi_master *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, n_links)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->idx;
+ __entry->n_links = master->n_links;
+ ),
+ TP_printk("fsi%d (%d links)", __entry->master_idx, __entry->n_links)
+);
+
TRACE_EVENT(fsi_slave_init,
TP_PROTO(const struct fsi_slave *slave),
TP_ARGS(slave),
diff --git a/include/trace/events/fsi_master_i2cr.h b/include/trace/events/fsi_master_i2cr.h
new file mode 100644
index 000000000000..c33eba130049
--- /dev/null
+++ b/include/trace/events/fsi_master_i2cr.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_i2cr
+
+#if !defined(_TRACE_FSI_MASTER_I2CR_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_I2CR_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(i2cr_i2c_error,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, int rc),
+ TP_ARGS(client, command, rc),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __field(int, rc)
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ __entry->rc = rc;
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } rc:%d", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, __entry->rc)
+);
+
+TRACE_EVENT(i2cr_read,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t *data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+TRACE_EVENT(i2cr_status,
+ TP_PROTO(const struct i2c_client *client, uint64_t status),
+ TP_ARGS(client, status),
+ TP_STRUCT__entry(
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x %016llx", __entry->bus, __entry->addr, __entry->status)
+);
+
+TRACE_EVENT(i2cr_status_error,
+ TP_PROTO(const struct i2c_client *client, uint64_t status, uint64_t error, uint64_t log),
+ TP_ARGS(client, status, error, log),
+ TP_STRUCT__entry(
+ __field(uint64_t, error)
+ __field(uint64_t, log)
+ __field(uint64_t, status)
+ __field(int, bus)
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->error = error;
+ __entry->log = log;
+ __entry->status = status;
+ __entry->bus = client->adapter->nr;
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x status:%016llx error:%016llx log:%016llx", __entry->bus, __entry->addr,
+ __entry->status, __entry->error, __entry->log)
+);
+
+TRACE_EVENT(i2cr_write,
+ TP_PROTO(const struct i2c_client *client, uint32_t command, uint64_t data),
+ TP_ARGS(client, command, data),
+ TP_STRUCT__entry(
+ __field(int, bus)
+ __array(unsigned char, data, sizeof(uint64_t))
+ __array(unsigned char, command, sizeof(uint32_t))
+ __field(unsigned short, addr)
+ ),
+ TP_fast_assign(
+ __entry->bus = client->adapter->nr;
+ memcpy(__entry->data, &data, sizeof(uint64_t));
+ memcpy(__entry->command, &command, sizeof(uint32_t));
+ __entry->addr = client->addr;
+ ),
+ TP_printk("%d-%02x command:{ %*ph } { %*ph }", __entry->bus, __entry->addr,
+ (int)sizeof(uint32_t), __entry->command, (int)sizeof(uint64_t), __entry->data)
+);
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kyber.h b/include/trace/events/kyber.h
index bf7533f171ff..9d44781efc1c 100644
--- a/include/trace/events/kyber.h
+++ b/include/trace/events/kyber.h
@@ -31,8 +31,8 @@ TRACE_EVENT(kyber_latency,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
- strlcpy(__entry->type, type, sizeof(__entry->type));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->type, type, sizeof(__entry->type));
__entry->percentile = percentile;
__entry->numerator = numerator;
__entry->denominator = denominator;
@@ -59,7 +59,7 @@ TRACE_EVENT(kyber_adjust,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
__entry->depth = depth;
),
@@ -81,7 +81,7 @@ TRACE_EVENT(kyber_throttled,
TP_fast_assign(
__entry->dev = dev;
- strlcpy(__entry->domain, domain, sizeof(__entry->domain));
+ strscpy(__entry->domain, domain, sizeof(__entry->domain));
),
TP_printk("%d,%d %s", MAJOR(__entry->dev), MINOR(__entry->dev),
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index 43711753616a..6beb38c1dcb5 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -1706,7 +1706,7 @@ TRACE_DEFINE_ENUM(SVC_DENIED);
TRACE_DEFINE_ENUM(SVC_PENDING);
TRACE_DEFINE_ENUM(SVC_COMPLETE);
-#define svc_show_status(status) \
+#define show_svc_auth_status(status) \
__print_symbolic(status, \
{ SVC_GARBAGE, "SVC_GARBAGE" }, \
{ SVC_SYSERR, "SVC_SYSERR" }, \
@@ -1743,7 +1743,10 @@ TRACE_DEFINE_ENUM(SVC_COMPLETE);
__entry->xid, __get_sockaddr(server), __get_sockaddr(client)
TRACE_EVENT_CONDITION(svc_authenticate,
- TP_PROTO(const struct svc_rqst *rqst, int auth_res),
+ TP_PROTO(
+ const struct svc_rqst *rqst,
+ enum svc_auth_status auth_res
+ ),
TP_ARGS(rqst, auth_res),
@@ -1766,7 +1769,7 @@ TRACE_EVENT_CONDITION(svc_authenticate,
TP_printk(SVC_RQST_ENDPOINT_FORMAT
" auth_res=%s auth_stat=%s",
SVC_RQST_ENDPOINT_VARARGS,
- svc_show_status(__entry->svc_status),
+ show_svc_auth_status(__entry->svc_status),
rpc_show_auth_stat(__entry->auth_stat))
);
@@ -1918,25 +1921,42 @@ TRACE_EVENT(svc_stats_latency,
__get_str(procedure), __entry->execute)
);
+/*
+ * from include/linux/sunrpc/svc_xprt.h
+ */
+#define SVC_XPRT_FLAG_LIST \
+ svc_xprt_flag(BUSY) \
+ svc_xprt_flag(CONN) \
+ svc_xprt_flag(CLOSE) \
+ svc_xprt_flag(DATA) \
+ svc_xprt_flag(TEMP) \
+ svc_xprt_flag(DEAD) \
+ svc_xprt_flag(CHNGBUF) \
+ svc_xprt_flag(DEFERRED) \
+ svc_xprt_flag(OLD) \
+ svc_xprt_flag(LISTENER) \
+ svc_xprt_flag(CACHE_AUTH) \
+ svc_xprt_flag(LOCAL) \
+ svc_xprt_flag(KILL_TEMP) \
+ svc_xprt_flag(CONG_CTRL) \
+ svc_xprt_flag(HANDSHAKE) \
+ svc_xprt_flag(TLS_SESSION) \
+ svc_xprt_flag_end(PEER_AUTH)
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) TRACE_DEFINE_ENUM(XPT_##x);
+#define svc_xprt_flag_end(x) TRACE_DEFINE_ENUM(XPT_##x);
+
+SVC_XPRT_FLAG_LIST
+
+#undef svc_xprt_flag
+#undef svc_xprt_flag_end
+#define svc_xprt_flag(x) { BIT(XPT_##x), #x },
+#define svc_xprt_flag_end(x) { BIT(XPT_##x), #x }
+
#define show_svc_xprt_flags(flags) \
- __print_flags(flags, "|", \
- { BIT(XPT_BUSY), "BUSY" }, \
- { BIT(XPT_CONN), "CONN" }, \
- { BIT(XPT_CLOSE), "CLOSE" }, \
- { BIT(XPT_DATA), "DATA" }, \
- { BIT(XPT_TEMP), "TEMP" }, \
- { BIT(XPT_DEAD), "DEAD" }, \
- { BIT(XPT_CHNGBUF), "CHNGBUF" }, \
- { BIT(XPT_DEFERRED), "DEFERRED" }, \
- { BIT(XPT_OLD), "OLD" }, \
- { BIT(XPT_LISTENER), "LISTENER" }, \
- { BIT(XPT_CACHE_AUTH), "CACHE_AUTH" }, \
- { BIT(XPT_LOCAL), "LOCAL" }, \
- { BIT(XPT_KILL_TEMP), "KILL_TEMP" }, \
- { BIT(XPT_CONG_CTRL), "CONG_CTRL" }, \
- { BIT(XPT_HANDSHAKE), "HANDSHAKE" }, \
- { BIT(XPT_TLS_SESSION), "TLS_SESSION" }, \
- { BIT(XPT_PEER_AUTH), "PEER_AUTH" })
+ __print_flags(flags, "|", SVC_XPRT_FLAG_LIST)
TRACE_EVENT(svc_xprt_create_err,
TP_PROTO(
@@ -1994,25 +2014,25 @@ TRACE_EVENT(svc_xprt_create_err,
TRACE_EVENT(svc_xprt_enqueue,
TP_PROTO(
const struct svc_xprt *xprt,
- const struct svc_rqst *rqst
+ unsigned long flags
),
- TP_ARGS(xprt, rqst),
+ TP_ARGS(xprt, flags),
TP_STRUCT__entry(
SVC_XPRT_ENDPOINT_FIELDS(xprt)
-
- __field(int, pid)
),
TP_fast_assign(
- SVC_XPRT_ENDPOINT_ASSIGNMENTS(xprt);
-
- __entry->pid = rqst? rqst->rq_task->pid : 0;
+ __assign_sockaddr(server, &xprt->xpt_local,
+ xprt->xpt_locallen);
+ __assign_sockaddr(client, &xprt->xpt_remote,
+ xprt->xpt_remotelen);
+ __entry->flags = flags;
+ __entry->netns_ino = xprt->xpt_net->ns.inum;
),
- TP_printk(SVC_XPRT_ENDPOINT_FORMAT " pid=%d",
- SVC_XPRT_ENDPOINT_VARARGS, __entry->pid)
+ TP_printk(SVC_XPRT_ENDPOINT_FORMAT, SVC_XPRT_ENDPOINT_VARARGS)
);
TRACE_EVENT(svc_xprt_dequeue,
diff --git a/include/trace/events/task.h b/include/trace/events/task.h
index 64d160930b0d..47b527464d1a 100644
--- a/include/trace/events/task.h
+++ b/include/trace/events/task.h
@@ -47,7 +47,7 @@ TRACE_EVENT(task_rename,
TP_fast_assign(
__entry->pid = task->pid;
memcpy(entry->oldcomm, task->comm, TASK_COMM_LEN);
- strlcpy(entry->newcomm, comm, TASK_COMM_LEN);
+ strscpy(entry->newcomm, comm, TASK_COMM_LEN);
__entry->oom_score_adj = task->signal->oom_score_adj;
),
diff --git a/include/trace/events/thp.h b/include/trace/events/thp.h
index 202b3e3e67ff..f50048af5fcc 100644
--- a/include/trace/events/thp.h
+++ b/include/trace/events/thp.h
@@ -8,25 +8,34 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
-TRACE_EVENT(hugepage_set_pmd,
+DECLARE_EVENT_CLASS(hugepage_set,
- TP_PROTO(unsigned long addr, unsigned long pmd),
- TP_ARGS(addr, pmd),
+ TP_PROTO(unsigned long addr, unsigned long pte),
+ TP_ARGS(addr, pte),
TP_STRUCT__entry(
__field(unsigned long, addr)
- __field(unsigned long, pmd)
+ __field(unsigned long, pte)
),
TP_fast_assign(
__entry->addr = addr;
- __entry->pmd = pmd;
+ __entry->pte = pte;
),
- TP_printk("Set pmd with 0x%lx with 0x%lx", __entry->addr, __entry->pmd)
+ TP_printk("Set page table entry with 0x%lx with 0x%lx", __entry->addr, __entry->pte)
);
+DEFINE_EVENT(hugepage_set, hugepage_set_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd),
+ TP_ARGS(addr, pmd)
+);
-TRACE_EVENT(hugepage_update,
+DEFINE_EVENT(hugepage_set, hugepage_set_pud,
+ TP_PROTO(unsigned long addr, unsigned long pud),
+ TP_ARGS(addr, pud)
+);
+
+DECLARE_EVENT_CLASS(hugepage_update,
TP_PROTO(unsigned long addr, unsigned long pte, unsigned long clr, unsigned long set),
TP_ARGS(addr, pte, clr, set),
@@ -48,6 +57,16 @@ TRACE_EVENT(hugepage_update,
TP_printk("hugepage update at addr 0x%lx and pte = 0x%lx clr = 0x%lx, set = 0x%lx", __entry->addr, __entry->pte, __entry->clr, __entry->set)
);
+DEFINE_EVENT(hugepage_update, hugepage_update_pmd,
+ TP_PROTO(unsigned long addr, unsigned long pmd, unsigned long clr, unsigned long set),
+ TP_ARGS(addr, pmd, clr, set)
+);
+
+DEFINE_EVENT(hugepage_update, hugepage_update_pud,
+ TP_PROTO(unsigned long addr, unsigned long pud, unsigned long clr, unsigned long set),
+ TP_ARGS(addr, pud, clr, set)
+);
+
DECLARE_EVENT_CLASS(migration_pmd,
TP_PROTO(unsigned long addr, unsigned long pmd),
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index 9c66e59d859c..4661f0d27062 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,7 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
@@ -68,7 +68,7 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
@@ -105,7 +105,7 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
@@ -141,7 +141,7 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strlcpy(__entry->name, bdi_dev_name(bdi),
+ strscpy(__entry->name, bdi_dev_name(bdi),
ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 44a3f565264d..0577f0cdd231 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -6,26 +6,26 @@
#define _TRACE_XEN_H
#include <linux/tracepoint.h>
-#include <asm/paravirt_types.h>
+#include <asm/xen/hypervisor.h>
#include <asm/xen/trace_types.h>
struct multicall_entry;
/* Multicalls */
DECLARE_EVENT_CLASS(xen_mc__batch,
- TP_PROTO(enum paravirt_lazy_mode mode),
+ TP_PROTO(enum xen_lazy_mode mode),
TP_ARGS(mode),
TP_STRUCT__entry(
- __field(enum paravirt_lazy_mode, mode)
+ __field(enum xen_lazy_mode, mode)
),
TP_fast_assign(__entry->mode = mode),
TP_printk("start batch LAZY_%s",
- (__entry->mode == PARAVIRT_LAZY_MMU) ? "MMU" :
- (__entry->mode == PARAVIRT_LAZY_CPU) ? "CPU" : "NONE")
+ (__entry->mode == XEN_LAZY_MMU) ? "MMU" :
+ (__entry->mode == XEN_LAZY_CPU) ? "CPU" : "NONE")
);
#define DEFINE_XEN_MC_BATCH(name) \
DEFINE_EVENT(xen_mc__batch, name, \
- TP_PROTO(enum paravirt_lazy_mode mode), \
+ TP_PROTO(enum xen_lazy_mode mode), \
TP_ARGS(mode))
DEFINE_XEN_MC_BATCH(xen_mc_batch);
diff --git a/include/uapi/asm-generic/siginfo.h b/include/uapi/asm-generic/siginfo.h
index ffbe4cec9f32..0f52d0ac47c5 100644
--- a/include/uapi/asm-generic/siginfo.h
+++ b/include/uapi/asm-generic/siginfo.h
@@ -242,7 +242,8 @@ typedef struct siginfo {
#define SEGV_ADIPERR 7 /* Precise MCD exception */
#define SEGV_MTEAERR 8 /* Asynchronous ARM MTE error */
#define SEGV_MTESERR 9 /* Synchronous ARM MTE exception */
-#define NSIGSEGV 9
+#define SEGV_CPERR 10 /* Control protection fault */
+#define NSIGSEGV 10
/*
* SIGBUS si_codes
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 79b14828d542..f477eda6a2b8 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -94,6 +94,9 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
+ *
+ * %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
+ * signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -101,12 +104,14 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
+#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
- AMDGPU_GEM_DOMAIN_OA)
+ AMDGPU_GEM_DOMAIN_OA | \
+ AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index a87bbbbca2d4..794c1d857677 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -673,8 +673,11 @@ struct drm_gem_open {
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
- * PRIME buffers are exposed as dma-buf file descriptors. See
- * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
+ * &DRM_PRIME_CAP_EXPORT are always advertised.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors.
+ * See :ref:`prime_buffer_sharing`.
*/
#define DRM_CAP_PRIME 0x5
/**
@@ -682,6 +685,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
@@ -689,6 +694,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
@@ -756,15 +763,14 @@ struct drm_gem_open {
/**
* DRM_CAP_SYNCOBJ
*
- * If set to 1, the driver supports sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
@@ -909,6 +915,27 @@ struct drm_syncobj_timeline_wait {
__u32 pad;
};
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ * available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+ __s32 fd;
+ __u32 pad;
+};
+
struct drm_syncobj_array {
__u64 handles;
@@ -1169,6 +1196,8 @@ extern "C" {
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@@ -1180,25 +1209,50 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/*
- * Header for events written back to userspace on the drm fd. The
- * type defines the type of event, the length specifies the total
- * length of the event (including the header), and user_data is
- * typically a 64 bit value passed with the ioctl that triggered the
- * event. A read on the drm fd will always only return complete
- * events, that is, if for example the read buffer is 100 bytes, and
- * there are two 64 byte events pending, only one will be returned.
+/**
+ * struct drm_event - Header for DRM events
+ * @type: event type.
+ * @length: total number of payload bytes (including header).
*
- * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
- * up are chipset specific.
+ * This struct is a header for events written back to user-space on the DRM FD.
+ * A read on the DRM FD will always only return complete events: e.g. if the
+ * read buffer is 100 bytes large and there are two 64 byte events pending,
+ * only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
+ * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
+ * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
*/
struct drm_event {
__u32 type;
__u32 length;
};
+/**
+ * DRM_EVENT_VBLANK - vertical blanking event
+ *
+ * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
+ * &_DRM_VBLANK_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_VBLANK 0x01
+/**
+ * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
+ *
+ * This event is sent in response to an atomic commit or legacy page-flip with
+ * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_FLIP_COMPLETE 0x02
+/**
+ * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
+ *
+ * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
+ *
+ * The event payload is a struct drm_event_crtc_sequence.
+ */
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 43691058d28f..ea1b639bcb28 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -488,6 +488,9 @@ struct drm_mode_get_connector {
* This is not an object ID. This is a per-type connector number. Each
* (type, type_id) combination is unique across all connectors of a DRM
* device.
+ *
+ * The (type, type_id) combination is not a stable identifier: the
+ * type_id can change depending on the driver probe order.
*/
__u32 connector_type_id;
@@ -883,7 +886,7 @@ struct hdr_metadata_infoframe {
*/
struct {
__u16 x, y;
- } display_primaries[3];
+ } display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
@@ -894,7 +897,7 @@ struct hdr_metadata_infoframe {
*/
struct {
__u16 x, y;
- } white_point;
+ } white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 839820aed87e..a58a14c9f222 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -60,6 +60,7 @@ extern "C" {
#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
+#define DRM_IVPU_PARAM_CAPABILITIES 13
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
@@ -68,6 +69,9 @@ extern "C" {
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+#define DRM_IVPU_CAP_METRIC_STREAMER 1
+#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
+
/**
* struct drm_ivpu_param - Get/Set VPU parameters
*/
@@ -129,8 +133,10 @@ struct drm_ivpu_param {
__u64 value;
};
-#define DRM_IVPU_BO_HIGH_MEM 0x00000001
+#define DRM_IVPU_BO_SHAVE_MEM 0x00000001
+#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
#define DRM_IVPU_BO_MAPPABLE 0x00000002
+#define DRM_IVPU_BO_DMA_MEM 0x00000004
#define DRM_IVPU_BO_CACHED 0x00000000
#define DRM_IVPU_BO_UNCACHED 0x00010000
@@ -140,6 +146,7 @@ struct drm_ivpu_param {
#define DRM_IVPU_BO_FLAGS \
(DRM_IVPU_BO_HIGH_MEM | \
DRM_IVPU_BO_MAPPABLE | \
+ DRM_IVPU_BO_DMA_MEM | \
DRM_IVPU_BO_CACHE_MASK)
/**
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 853a327433d3..8d7402c13e56 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -33,11 +33,51 @@
extern "C" {
#endif
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+struct drm_nouveau_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_nouveau_channel_alloc {
+ __u32 fb_ctxdma_handle;
+ __u32 tt_ctxdma_handle;
+
+ __s32 channel;
+ __u32 pushbuf_domains;
+
+ /* Notifier memory */
+ __u32 notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ __u32 handle;
+ __u32 grclass;
+ } subchan[8];
+ __u32 nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ __s32 channel;
+};
+
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
+/* The BO will never be shared via import or export. */
+#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@@ -98,6 +138,7 @@ struct drm_nouveau_gem_pushbuf_push {
__u32 pad;
__u64 offset;
__u64 length;
+#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
};
struct drm_nouveau_gem_pushbuf {
@@ -126,16 +167,233 @@ struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
-#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+/**
+ * struct drm_nouveau_sync - sync object
+ *
+ * This structure serves as synchronization mechanism for (potentially)
+ * asynchronous operations such as EXEC or VM_BIND.
+ */
+struct drm_nouveau_sync {
+ /**
+ * @flags: the flags for a sync object
+ *
+ * The first 8 bits are used to determine the type of the sync object.
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
+#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
+#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
+ /**
+ * @handle: the handle of the sync object
+ */
+ __u32 handle;
+ /**
+ * @timeline_value:
+ *
+ * The timeline point of the sync object in case the syncobj is of
+ * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+};
+
+/**
+ * struct drm_nouveau_vm_init - GPU VA space init structure
+ *
+ * Used to initialize the GPU's VA space for a user client, telling the kernel
+ * which portion of the VA space is managed by the UMD and kernel respectively.
+ *
+ * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
+ * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
+ * with -ENOSYS.
+ */
+struct drm_nouveau_vm_init {
+ /**
+ * @kernel_managed_addr: start address of the kernel managed VA space
+ * region
+ */
+ __u64 kernel_managed_addr;
+ /**
+ * @kernel_managed_size: size of the kernel managed VA space region in
+ * bytes
+ */
+ __u64 kernel_managed_size;
+};
+
+/**
+ * struct drm_nouveau_vm_bind_op - VM_BIND operation
+ *
+ * This structure represents a single VM_BIND operation. UMDs should pass
+ * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
+ */
+struct drm_nouveau_vm_bind_op {
+ /**
+ * @op: the operation type
+ */
+ __u32 op;
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_MAP:
+ *
+ * Map a GEM object to the GPU's VA space. Optionally, the
+ * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
+ * create sparse mappings for the given range.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
+ *
+ * Unmap an existing mapping in the GPU's VA space. If the region the mapping
+ * is located in is a sparse region, new sparse mappings are created where the
+ * unmapped (memory backed) mapping was mapped previously. To remove a sparse
+ * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind_op
+ */
+ __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_SPARSE:
+ *
+ * Indicates that an allocated VA space region should be sparse.
+ */
+#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
+ /**
+ * @handle: the handle of the DRM GEM object to map
+ */
+ __u32 handle;
+ /**
+ * @pad: 32 bit padding, should be 0
+ */
+ __u32 pad;
+ /**
+ * @addr:
+ *
+ * the address the VA space region or (memory backed) mapping should be mapped to
+ */
+ __u64 addr;
+ /**
+ * @bo_offset: the offset within the BO backing the mapping
+ */
+ __u64 bo_offset;
+ /**
+ * @range: the size of the requested mapping in bytes
+ */
+ __u64 range;
+};
+
+/**
+ * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
+ */
+struct drm_nouveau_vm_bind {
+ /**
+ * @op_count: the number of &drm_nouveau_vm_bind_op
+ */
+ __u32 op_count;
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind ioctl
+ */
+ __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
+ *
+ * Indicates that the given VM_BIND operation should be executed asynchronously
+ * by the kernel.
+ *
+ * If this flag is not supplied the kernel executes the associated operations
+ * synchronously and doesn't accept any &drm_nouveau_sync objects.
+ */
+#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
+ */
+ __u64 op_ptr;
+};
+
+/**
+ * struct drm_nouveau_exec_push - EXEC push operation
+ *
+ * This structure represents a single EXEC push operation. UMDs should pass an
+ * array of this structure via struct drm_nouveau_exec's &push_ptr field.
+ */
+struct drm_nouveau_exec_push {
+ /**
+ * @va: the virtual address of the push buffer mapping
+ */
+ __u64 va;
+ /**
+ * @va_len: the length of the push buffer mapping
+ */
+ __u32 va_len;
+ /**
+ * @flags: the flags for this push buffer mapping
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
+};
+
+/**
+ * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
+ */
+struct drm_nouveau_exec {
+ /**
+ * @channel: the channel to execute the push buffer in
+ */
+ __u32 channel;
+ /**
+ * @push_count: the number of &drm_nouveau_exec_push ops
+ */
+ __u32 push_count;
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @push_ptr: pointer to &drm_nouveau_exec_push ops
+ */
+ __u64 push_ptr;
+};
+
+#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
+#define DRM_NOUVEAU_VM_INIT 0x10
+#define DRM_NOUVEAU_VM_BIND 0x11
+#define DRM_NOUVEAU_EXEC 0x12
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@@ -188,6 +446,10 @@ struct drm_nouveau_svm_bind {
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
+#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
@@ -197,6 +459,9 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
+#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
+#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 7b158fcb02b4..b1d0e56565bc 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -64,6 +64,16 @@ struct drm_virtgpu_map {
__u32 pad;
};
+#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
+#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
+ VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
+ 0)
+struct drm_virtgpu_execbuffer_syncobj {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+};
+
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
@@ -73,7 +83,11 @@ struct drm_virtgpu_execbuffer {
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
- __u32 pad;
+ __u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
+ __u32 num_in_syncobjs;
+ __u32 num_out_syncobjs;
+ __u64 in_syncobjs;
+ __u64 out_syncobjs;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 8790b3962e4b..0448700890f7 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1962,7 +1962,9 @@ union bpf_attr {
* performed again, if the helper is used in combination with
* direct packet access.
* Return
- * 0 on success, or a negative error in case of failure.
+ * 0 on success, or a negative error in case of failure. Positive
+ * error indicates a potential drop or congestion in the target
+ * device. The particular positive error codes are not defined.
*
* u64 bpf_get_current_pid_tgid(void)
* Description
diff --git a/include/uapi/linux/cgroupstats.h b/include/uapi/linux/cgroupstats.h
index aa306e4cd6c1..80b2c8594480 100644
--- a/include/uapi/linux/cgroupstats.h
+++ b/include/uapi/linux/cgroupstats.h
@@ -24,8 +24,6 @@
* basis. This data is shared using taskstats.
*
* Most of these states are derived by looking at the task->state value
- * For the nr_io_wait state, a flag in the delay accounting structure
- * indicates that the task is waiting on IO
*
* Each member is aligned to a 8 byte boundary.
*/
diff --git a/include/uapi/linux/dlm_plock.h b/include/uapi/linux/dlm_plock.h
index 63b6c1fd9169..eb66afcac40e 100644
--- a/include/uapi/linux/dlm_plock.h
+++ b/include/uapi/linux/dlm_plock.h
@@ -22,6 +22,7 @@ enum {
DLM_PLOCK_OP_LOCK = 1,
DLM_PLOCK_OP_UNLOCK,
DLM_PLOCK_OP_GET,
+ DLM_PLOCK_OP_CANCEL,
};
#define DLM_PLOCK_FL_CLOSE 1
diff --git a/include/uapi/linux/elf-fdpic.h b/include/uapi/linux/elf-fdpic.h
index 4fcc6cfebe18..ec23f0871129 100644
--- a/include/uapi/linux/elf-fdpic.h
+++ b/include/uapi/linux/elf-fdpic.h
@@ -32,4 +32,19 @@ struct elf32_fdpic_loadmap {
#define ELF32_FDPIC_LOADMAP_VERSION 0x0000
+/* segment mappings for ELF FDPIC libraries/executables/interpreters */
+struct elf64_fdpic_loadseg {
+ Elf64_Addr addr; /* core address to which mapped */
+ Elf64_Addr p_vaddr; /* VMA recorded in file */
+ Elf64_Word p_memsz; /* allocation size recorded in file */
+};
+
+struct elf64_fdpic_loadmap {
+ Elf64_Half version; /* version of these structures, just in case... */
+ Elf64_Half nsegs; /* number of segments */
+ struct elf64_fdpic_loadseg segs[];
+};
+
+#define ELF64_FDPIC_LOADMAP_VERSION 0x0000
+
#endif /* _UAPI_LINUX_ELF_FDPIC_H */
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index e0e159138331..9b731976ce2f 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -409,6 +409,8 @@ typedef struct elf64_shdr {
#define NT_386_TLS 0x200 /* i386 TLS slots (struct user_desc) */
#define NT_386_IOPERM 0x201 /* x86 io permission bitmap (1=deny) */
#define NT_X86_XSTATE 0x202 /* x86 extended state using xsave */
+/* Old binutils treats 0x203 as a CET state */
+#define NT_X86_SHSTK 0x204 /* x86 SHSTK state */
#define NT_S390_HIGH_GPRS 0x300 /* s390 upper register halves */
#define NT_S390_TIMER 0x301 /* s390 timer register */
#define NT_S390_TODCMP 0x302 /* s390 TOD clock comparator register */
@@ -443,6 +445,8 @@ typedef struct elf64_shdr {
#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
#define NT_MIPS_MSA 0x802 /* MIPS SIMD registers */
+#define NT_RISCV_CSR 0x900 /* RISC-V Control and Status Registers */
+#define NT_RISCV_VECTOR 0x901 /* RISC-V vector registers */
#define NT_LOONGARCH_CPUCFG 0xa00 /* LoongArch CPU config registers */
#define NT_LOONGARCH_CSR 0xa01 /* LoongArch control and status registers */
#define NT_LOONGARCH_LSX 0xa02 /* LoongArch Loongson SIMD Extension registers */
diff --git a/include/uapi/linux/fsi.h b/include/uapi/linux/fsi.h
index b2f1977378c7..a2e730fc6309 100644
--- a/include/uapi/linux/fsi.h
+++ b/include/uapi/linux/fsi.h
@@ -60,6 +60,16 @@ struct scom_access {
*/
/**
+ * FSI_SBEFIFO_CMD_TIMEOUT sets the timeout for writing data to the SBEFIFO.
+ *
+ * The command timeout is specified in seconds. The minimum value of command
+ * timeout is 1 seconds (default) and the maximum value of command timeout is
+ * 120 seconds. A command timeout of 0 will reset the value to the default of
+ * 1 seconds.
+ */
+#define FSI_SBEFIFO_CMD_TIMEOUT_SECONDS _IOW('s', 0x01, __u32)
+
+/**
* FSI_SBEFIFO_READ_TIMEOUT sets the read timeout for response from SBE.
*
* The read timeout is specified in seconds. The minimum value of read
diff --git a/include/uapi/linux/fuse.h b/include/uapi/linux/fuse.h
index b3fcab13fcd3..db92a7202b34 100644
--- a/include/uapi/linux/fuse.h
+++ b/include/uapi/linux/fuse.h
@@ -207,6 +207,10 @@
* - add FUSE_EXT_GROUPS
* - add FUSE_CREATE_SUPP_GROUP
* - add FUSE_HAS_EXPIRE_ONLY
+ *
+ * 7.39
+ * - add FUSE_DIRECT_IO_RELAX
+ * - add FUSE_STATX and related structures
*/
#ifndef _LINUX_FUSE_H
@@ -242,7 +246,7 @@
#define FUSE_KERNEL_VERSION 7
/** Minor version number of this interface */
-#define FUSE_KERNEL_MINOR_VERSION 38
+#define FUSE_KERNEL_MINOR_VERSION 39
/** The node ID of the root inode */
#define FUSE_ROOT_ID 1
@@ -269,6 +273,40 @@ struct fuse_attr {
uint32_t flags;
};
+/*
+ * The following structures are bit-for-bit compatible with the statx(2) ABI in
+ * Linux.
+ */
+struct fuse_sx_time {
+ int64_t tv_sec;
+ uint32_t tv_nsec;
+ int32_t __reserved;
+};
+
+struct fuse_statx {
+ uint32_t mask;
+ uint32_t blksize;
+ uint64_t attributes;
+ uint32_t nlink;
+ uint32_t uid;
+ uint32_t gid;
+ uint16_t mode;
+ uint16_t __spare0[1];
+ uint64_t ino;
+ uint64_t size;
+ uint64_t blocks;
+ uint64_t attributes_mask;
+ struct fuse_sx_time atime;
+ struct fuse_sx_time btime;
+ struct fuse_sx_time ctime;
+ struct fuse_sx_time mtime;
+ uint32_t rdev_major;
+ uint32_t rdev_minor;
+ uint32_t dev_major;
+ uint32_t dev_minor;
+ uint64_t __spare2[14];
+};
+
struct fuse_kstatfs {
uint64_t blocks;
uint64_t bfree;
@@ -371,6 +409,8 @@ struct fuse_file_lock {
* FUSE_CREATE_SUPP_GROUP: add supplementary group info to create, mkdir,
* symlink and mknod (single group that matches parent)
* FUSE_HAS_EXPIRE_ONLY: kernel supports expiry-only entry invalidation
+ * FUSE_DIRECT_IO_RELAX: relax restrictions in FOPEN_DIRECT_IO mode, for now
+ * allow shared mmap
*/
#define FUSE_ASYNC_READ (1 << 0)
#define FUSE_POSIX_LOCKS (1 << 1)
@@ -409,6 +449,7 @@ struct fuse_file_lock {
#define FUSE_HAS_INODE_DAX (1ULL << 33)
#define FUSE_CREATE_SUPP_GROUP (1ULL << 34)
#define FUSE_HAS_EXPIRE_ONLY (1ULL << 35)
+#define FUSE_DIRECT_IO_RELAX (1ULL << 36)
/**
* CUSE INIT request/reply flags
@@ -575,6 +616,7 @@ enum fuse_opcode {
FUSE_REMOVEMAPPING = 49,
FUSE_SYNCFS = 50,
FUSE_TMPFILE = 51,
+ FUSE_STATX = 52,
/* CUSE specific operations */
CUSE_INIT = 4096,
@@ -639,6 +681,22 @@ struct fuse_attr_out {
struct fuse_attr attr;
};
+struct fuse_statx_in {
+ uint32_t getattr_flags;
+ uint32_t reserved;
+ uint64_t fh;
+ uint32_t sx_flags;
+ uint32_t sx_mask;
+};
+
+struct fuse_statx_out {
+ uint64_t attr_valid; /* Cache timeout for the attributes */
+ uint32_t attr_valid_nsec;
+ uint32_t flags;
+ uint64_t spare[2];
+ struct fuse_statx stat;
+};
+
#define FUSE_COMPAT_MKNOD_IN_SIZE 8
struct fuse_mknod_in {
diff --git a/include/uapi/linux/gsmmux.h b/include/uapi/linux/gsmmux.h
index eb67884e5f38..4c878d84dbda 100644
--- a/include/uapi/linux/gsmmux.h
+++ b/include/uapi/linux/gsmmux.h
@@ -2,10 +2,45 @@
#ifndef _LINUX_GSMMUX_H
#define _LINUX_GSMMUX_H
+#include <linux/const.h>
#include <linux/if.h>
#include <linux/ioctl.h>
#include <linux/types.h>
+/*
+ * flags definition for n_gsm
+ *
+ * Used by:
+ * struct gsm_config_ext.flags
+ * struct gsm_dlci_config.flags
+ */
+/* Forces a DLCI reset if set. Otherwise, a DLCI reset is only done if
+ * incompatible settings were provided. Always cleared on retrieval.
+ */
+#define GSM_FL_RESTART _BITUL(0)
+
+/**
+ * struct gsm_config - n_gsm basic configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF and GSMIOC_SETCONF
+ * to retrieve and set the basic parameters of an n_gsm ldisc.
+ * struct gsm_config_ext can be used to configure extended ldisc parameters.
+ *
+ * All timers are in units of 1/100th of a second.
+ *
+ * @adaption: Convergence layer type
+ * @encapsulation: Framing (0 = basic option, 1 = advanced option)
+ * @initiator: Initiator or responder
+ * @t1: Acknowledgment timer
+ * @t2: Response timer for multiplexer control channel
+ * @t3: Response timer for wake-up procedure
+ * @n2: Maximum number of retransmissions
+ * @mru: Maximum incoming frame payload size
+ * @mtu: Maximum outgoing frame payload size
+ * @k: Window size
+ * @i: Frame type (1 = UIH, 2 = UI)
+ * @unused: Can not be used
+ */
struct gsm_config
{
unsigned int adaption;
@@ -19,18 +54,32 @@ struct gsm_config
unsigned int mtu;
unsigned int k;
unsigned int i;
- unsigned int unused[8]; /* Can not be used */
+ unsigned int unused[8];
};
#define GSMIOC_GETCONF _IOR('G', 0, struct gsm_config)
#define GSMIOC_SETCONF _IOW('G', 1, struct gsm_config)
+/**
+ * struct gsm_netconfig - n_gsm network configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_ENABLE_NET and
+ * GSMIOC_DISABLE_NET to enable or disable a network data connection
+ * over a mux virtual tty channel. This is for modems that support
+ * data connections with raw IP frames instead of PPP.
+ *
+ * @adaption: Adaption to use in network mode.
+ * @protocol: Protocol to use - only ETH_P_IP supported.
+ * @unused2: Can not be used.
+ * @if_name: Interface name format string.
+ * @unused: Can not be used.
+ */
struct gsm_netconfig {
- unsigned int adaption; /* Adaption to use in network mode */
- unsigned short protocol;/* Protocol to use - only ETH_P_IP supported */
- unsigned short unused2; /* Can not be used */
- char if_name[IFNAMSIZ]; /* interface name format string */
- __u8 unused[28]; /* Can not be used */
+ unsigned int adaption;
+ unsigned short protocol;
+ unsigned short unused2;
+ char if_name[IFNAMSIZ];
+ __u8 unused[28];
};
#define GSMIOC_ENABLE_NET _IOW('G', 2, struct gsm_netconfig)
@@ -39,26 +88,57 @@ struct gsm_netconfig {
/* get the base tty number for a configured gsmmux tty */
#define GSMIOC_GETFIRST _IOR('G', 4, __u32)
+/**
+ * struct gsm_config_ext - n_gsm extended configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF_EXT and
+ * GSMIOC_SETCONF_EXT to retrieve and set the extended parameters of an
+ * n_gsm ldisc.
+ *
+ * All timers are in units of 1/100th of a second.
+ *
+ * @keep_alive: Control channel keep-alive in 1/100th of a second (0 to disable).
+ * @wait_config: Wait for DLCI config before opening virtual link?
+ * @flags: Mux specific flags.
+ * @reserved: For future use, must be initialized to zero.
+ */
struct gsm_config_ext {
- __u32 keep_alive; /* Control channel keep-alive in 1/100th of a
- * second (0 to disable)
- */
- __u32 wait_config; /* Wait for DLCI config before opening virtual link? */
- __u32 reserved[6]; /* For future use, must be initialized to zero */
+ __u32 keep_alive;
+ __u32 wait_config;
+ __u32 flags;
+ __u32 reserved[5];
};
#define GSMIOC_GETCONF_EXT _IOR('G', 5, struct gsm_config_ext)
#define GSMIOC_SETCONF_EXT _IOW('G', 6, struct gsm_config_ext)
-/* Set channel accordingly before calling GSMIOC_GETCONF_DLCI. */
+/**
+ * struct gsm_dlci_config - n_gsm channel configuration parameters
+ *
+ * This structure is used in combination with GSMIOC_GETCONF_DLCI and
+ * GSMIOC_SETCONF_DLCI to retrieve and set the channel specific parameters
+ * of an n_gsm ldisc.
+ *
+ * Set the channel accordingly before calling GSMIOC_GETCONF_DLCI.
+ *
+ * @channel: DLCI (0 for the associated DLCI).
+ * @adaption: Convergence layer type.
+ * @mtu: Maximum transfer unit.
+ * @priority: Priority (0 for default value).
+ * @i: Frame type (1 = UIH, 2 = UI).
+ * @k: Window size (0 for default value).
+ * @flags: DLCI specific flags.
+ * @reserved: For future use, must be initialized to zero.
+ */
struct gsm_dlci_config {
- __u32 channel; /* DLCI (0 for the associated DLCI) */
- __u32 adaption; /* Convergence layer type */
- __u32 mtu; /* Maximum transfer unit */
- __u32 priority; /* Priority (0 for default value) */
- __u32 i; /* Frame type (1 = UIH, 2 = UI) */
- __u32 k; /* Window size (0 for default value) */
- __u32 reserved[8]; /* For future use, must be initialized to zero */
+ __u32 channel;
+ __u32 adaption;
+ __u32 mtu;
+ __u32 priority;
+ __u32 i;
+ __u32 k;
+ __u32 flags;
+ __u32 reserved[7];
};
#define GSMIOC_GETCONF_DLCI _IOWR('G', 7, struct gsm_dlci_config)
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index 08720c7bd92f..8e61f8b7c2ce 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -185,6 +185,11 @@ enum {
*/
#define IORING_SETUP_REGISTERED_FD_ONLY (1U << 15)
+/*
+ * Removes indirection through the SQ index array.
+ */
+#define IORING_SETUP_NO_SQARRAY (1U << 16)
+
enum io_uring_op {
IORING_OP_NOP,
IORING_OP_READV,
@@ -299,11 +304,15 @@ enum io_uring_op {
* request 'user_data'
* IORING_ASYNC_CANCEL_ANY Match any request
* IORING_ASYNC_CANCEL_FD_FIXED 'fd' passed in is a fixed descriptor
+ * IORING_ASYNC_CANCEL_USERDATA Match on user_data, default for no other key
+ * IORING_ASYNC_CANCEL_OP Match request based on opcode
*/
#define IORING_ASYNC_CANCEL_ALL (1U << 0)
#define IORING_ASYNC_CANCEL_FD (1U << 1)
#define IORING_ASYNC_CANCEL_ANY (1U << 2)
#define IORING_ASYNC_CANCEL_FD_FIXED (1U << 3)
+#define IORING_ASYNC_CANCEL_USERDATA (1U << 4)
+#define IORING_ASYNC_CANCEL_OP (1U << 5)
/*
* send/sendmsg and recv/recvmsg flags (sqe->ioprio)
@@ -697,7 +706,9 @@ struct io_uring_sync_cancel_reg {
__s32 fd;
__u32 flags;
struct __kernel_timespec timeout;
- __u64 pad[4];
+ __u8 opcode;
+ __u8 pad[7];
+ __u64 pad2[3];
};
/*
@@ -717,6 +728,14 @@ struct io_uring_recvmsg_out {
__u32 flags;
};
+/*
+ * Argument for IORING_OP_URING_CMD when file is a socket
+ */
+enum {
+ SOCKET_URING_OP_SIOCINQ = 0,
+ SOCKET_URING_OP_SIOCOUTQ,
+};
+
#ifdef __cplusplus
}
#endif
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 98ebba80cfa1..b4ba0c0cbab6 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -45,6 +45,8 @@ enum {
IOMMUFD_CMD_IOAS_UNMAP,
IOMMUFD_CMD_OPTION,
IOMMUFD_CMD_VFIO_IOAS,
+ IOMMUFD_CMD_HWPT_ALLOC,
+ IOMMUFD_CMD_GET_HW_INFO,
};
/**
@@ -344,4 +346,99 @@ struct iommu_vfio_ioas {
__u16 __reserved;
};
#define IOMMU_VFIO_IOAS _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VFIO_IOAS)
+
+/**
+ * struct iommu_hwpt_alloc - ioctl(IOMMU_HWPT_ALLOC)
+ * @size: sizeof(struct iommu_hwpt_alloc)
+ * @flags: Must be 0
+ * @dev_id: The device to allocate this HWPT for
+ * @pt_id: The IOAS to connect this HWPT to
+ * @out_hwpt_id: The ID of the new HWPT
+ * @__reserved: Must be 0
+ *
+ * Explicitly allocate a hardware page table object. This is the same object
+ * type that is returned by iommufd_device_attach() and represents the
+ * underlying iommu driver's iommu_domain kernel object.
+ *
+ * A HWPT will be created with the IOVA mappings from the given IOAS.
+ */
+struct iommu_hwpt_alloc {
+ __u32 size;
+ __u32 flags;
+ __u32 dev_id;
+ __u32 pt_id;
+ __u32 out_hwpt_id;
+ __u32 __reserved;
+};
+#define IOMMU_HWPT_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_HWPT_ALLOC)
+
+/**
+ * struct iommu_hw_info_vtd - Intel VT-d hardware information
+ *
+ * @flags: Must be 0
+ * @__reserved: Must be 0
+ *
+ * @cap_reg: Value of Intel VT-d capability register defined in VT-d spec
+ * section 11.4.2 Capability Register.
+ * @ecap_reg: Value of Intel VT-d capability register defined in VT-d spec
+ * section 11.4.3 Extended Capability Register.
+ *
+ * User needs to understand the Intel VT-d specification to decode the
+ * register value.
+ */
+struct iommu_hw_info_vtd {
+ __u32 flags;
+ __u32 __reserved;
+ __aligned_u64 cap_reg;
+ __aligned_u64 ecap_reg;
+};
+
+/**
+ * enum iommu_hw_info_type - IOMMU Hardware Info Types
+ * @IOMMU_HW_INFO_TYPE_NONE: Used by the drivers that do not report hardware
+ * info
+ * @IOMMU_HW_INFO_TYPE_INTEL_VTD: Intel VT-d iommu info type
+ */
+enum iommu_hw_info_type {
+ IOMMU_HW_INFO_TYPE_NONE,
+ IOMMU_HW_INFO_TYPE_INTEL_VTD,
+};
+
+/**
+ * struct iommu_hw_info - ioctl(IOMMU_GET_HW_INFO)
+ * @size: sizeof(struct iommu_hw_info)
+ * @flags: Must be 0
+ * @dev_id: The device bound to the iommufd
+ * @data_len: Input the length of a user buffer in bytes. Output the length of
+ * data that kernel supports
+ * @data_uptr: User pointer to a user-space buffer used by the kernel to fill
+ * the iommu type specific hardware information data
+ * @out_data_type: Output the iommu hardware info type as defined in the enum
+ * iommu_hw_info_type.
+ * @__reserved: Must be 0
+ *
+ * Query an iommu type specific hardware information data from an iommu behind
+ * a given device that has been bound to iommufd. This hardware info data will
+ * be used to sync capabilities between the virtual iommu and the physical
+ * iommu, e.g. a nested translation setup needs to check the hardware info, so
+ * a guest stage-1 page table can be compatible with the physical iommu.
+ *
+ * To capture an iommu type specific hardware information data, @data_uptr and
+ * its length @data_len must be provided. Trailing bytes will be zeroed if the
+ * user buffer is larger than the data that kernel has. Otherwise, kernel only
+ * fills the buffer using the given length in @data_len. If the ioctl succeeds,
+ * @data_len will be updated to the length that kernel actually supports,
+ * @out_data_type will be filled to decode the data filled in the buffer
+ * pointed by @data_uptr. Input @data_len == zero is allowed.
+ */
+struct iommu_hw_info {
+ __u32 size;
+ __u32 flags;
+ __u32 dev_id;
+ __u32 data_len;
+ __aligned_u64 data_uptr;
+ __u32 out_data_type;
+ __u32 __reserved;
+};
+#define IOMMU_GET_HW_INFO _IO(IOMMUFD_TYPE, IOMMUFD_CMD_GET_HW_INFO)
#endif
diff --git a/include/uapi/linux/ioprio.h b/include/uapi/linux/ioprio.h
index 99440b2e8c35..bee2bdb0eedb 100644
--- a/include/uapi/linux/ioprio.h
+++ b/include/uapi/linux/ioprio.h
@@ -107,20 +107,21 @@ enum {
/*
* Return an I/O priority value based on a class, a level and a hint.
*/
-static __always_inline __u16 ioprio_value(int class, int level, int hint)
+static __always_inline __u16 ioprio_value(int prioclass, int priolevel,
+ int priohint)
{
- if (IOPRIO_BAD_VALUE(class, IOPRIO_NR_CLASSES) ||
- IOPRIO_BAD_VALUE(level, IOPRIO_NR_LEVELS) ||
- IOPRIO_BAD_VALUE(hint, IOPRIO_NR_HINTS))
+ if (IOPRIO_BAD_VALUE(prioclass, IOPRIO_NR_CLASSES) ||
+ IOPRIO_BAD_VALUE(priolevel, IOPRIO_NR_LEVELS) ||
+ IOPRIO_BAD_VALUE(priohint, IOPRIO_NR_HINTS))
return IOPRIO_CLASS_INVALID << IOPRIO_CLASS_SHIFT;
- return (class << IOPRIO_CLASS_SHIFT) |
- (hint << IOPRIO_HINT_SHIFT) | level;
+ return (prioclass << IOPRIO_CLASS_SHIFT) |
+ (priohint << IOPRIO_HINT_SHIFT) | priolevel;
}
-#define IOPRIO_PRIO_VALUE(class, level) \
- ioprio_value(class, level, IOPRIO_HINT_NONE)
-#define IOPRIO_PRIO_VALUE_HINT(class, level, hint) \
- ioprio_value(class, level, hint)
+#define IOPRIO_PRIO_VALUE(prioclass, priolevel) \
+ ioprio_value(prioclass, priolevel, IOPRIO_HINT_NONE)
+#define IOPRIO_PRIO_VALUE_HINT(prioclass, priolevel, priohint) \
+ ioprio_value(prioclass, priolevel, priohint)
#endif /* _UAPI_LINUX_IOPRIO_H */
diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h
index 981016e05cfa..01766dd839b0 100644
--- a/include/uapi/linux/kexec.h
+++ b/include/uapi/linux/kexec.h
@@ -12,6 +12,7 @@
/* kexec flags for different usage scenarios */
#define KEXEC_ON_CRASH 0x00000001
#define KEXEC_PRESERVE_CONTEXT 0x00000002
+#define KEXEC_UPDATE_ELFCOREHDR 0x00000004
#define KEXEC_ARCH_MASK 0xffff0000
/*
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f089ab290978..13065dd96132 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1418,9 +1418,16 @@ struct kvm_device_attr {
__u64 addr; /* userspace address of attr data */
};
-#define KVM_DEV_VFIO_GROUP 1
-#define KVM_DEV_VFIO_GROUP_ADD 1
-#define KVM_DEV_VFIO_GROUP_DEL 2
+#define KVM_DEV_VFIO_FILE 1
+
+#define KVM_DEV_VFIO_FILE_ADD 1
+#define KVM_DEV_VFIO_FILE_DEL 2
+
+/* KVM_DEV_VFIO_GROUP aliases are for compile time uapi compatibility */
+#define KVM_DEV_VFIO_GROUP KVM_DEV_VFIO_FILE
+
+#define KVM_DEV_VFIO_GROUP_ADD KVM_DEV_VFIO_FILE_ADD
+#define KVM_DEV_VFIO_GROUP_DEL KVM_DEV_VFIO_FILE_DEL
#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
enum kvm_device_type {
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 8466c2a9938f..ca30232b7bc8 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -263,6 +263,7 @@ enum nft_chain_attributes {
* @NFTA_RULE_USERDATA: user data (NLA_BINARY, NFT_USERDATA_MAXLEN)
* @NFTA_RULE_ID: uniquely identifies a rule in a transaction (NLA_U32)
* @NFTA_RULE_POSITION_ID: transaction unique identifier of the previous rule (NLA_U32)
+ * @NFTA_RULE_CHAIN_ID: add the rule to chain by ID, alternative to @NFTA_RULE_CHAIN (NLA_U32)
*/
enum nft_rule_attributes {
NFTA_RULE_UNSPEC,
diff --git a/include/uapi/linux/rpmsg.h b/include/uapi/linux/rpmsg.h
index 1637e68177d9..f0c8da2b185b 100644
--- a/include/uapi/linux/rpmsg.h
+++ b/include/uapi/linux/rpmsg.h
@@ -43,4 +43,14 @@ struct rpmsg_endpoint_info {
*/
#define RPMSG_RELEASE_DEV_IOCTL _IOW(0xb5, 0x4, struct rpmsg_endpoint_info)
+/**
+ * Get the flow control state of the remote rpmsg char device.
+ */
+#define RPMSG_GET_OUTGOING_FLOWCONTROL _IOR(0xb5, 0x5, int)
+
+/**
+ * Set the flow control state of the local rpmsg char device.
+ */
+#define RPMSG_SET_INCOMING_FLOWCONTROL _IOR(0xb5, 0x6, int)
+
#endif
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index dc2efd345133..d3994b7716bc 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -49,13 +49,23 @@ enum opal_lock_flags {
OPAL_SAVE_FOR_LOCK = 0x01,
};
+enum opal_key_type {
+ OPAL_INCLUDED = 0, /* key[] is the key */
+ OPAL_KEYRING, /* key is in keyring */
+};
+
struct opal_key {
__u8 lr;
__u8 key_len;
- __u8 __align[6];
+ __u8 key_type;
+ __u8 __align[5];
__u8 key[OPAL_KEY_MAX];
};
+enum opal_revert_lsp_opts {
+ OPAL_PRESERVE = 0x01,
+};
+
struct opal_lr_act {
struct opal_key key;
__u32 sum;
@@ -173,6 +183,17 @@ struct opal_geometry {
__u8 __align[3];
};
+struct opal_discovery {
+ __u64 data;
+ __u64 size;
+};
+
+struct opal_revert_lsp {
+ struct opal_key key;
+ __u32 options;
+ __u32 __pad;
+};
+
#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock)
#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock)
#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key)
@@ -192,5 +213,7 @@ struct opal_geometry {
#define IOC_OPAL_GET_STATUS _IOR('p', 236, struct opal_status)
#define IOC_OPAL_GET_LR_STATUS _IOW('p', 237, struct opal_lr_status)
#define IOC_OPAL_GET_GEOMETRY _IOR('p', 238, struct opal_geometry)
+#define IOC_OPAL_DISCOVERY _IOW('p', 239, struct opal_discovery)
+#define IOC_OPAL_REVERT_LSP _IOW('p', 240, struct opal_revert_lsp)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index 281fa286555c..add349889d0a 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -25,6 +25,8 @@
/*
* The type definitions. These are from Ted Ts'o's serial.h
+ * By historical reasons the values from 0 to 13 are defined
+ * in the include/uapi/linux/serial.h, do not define them here.
*/
#define PORT_NS16550A 14
#define PORT_XSCALE 15
@@ -94,15 +96,9 @@
#define PORT_SCIF 53
#define PORT_IRDA 54
-/* Samsung S3C2410 SoC and derivatives thereof */
-#define PORT_S3C2410 55
-
/* SGI IP22 aka Indy / Challenge S / Indigo 2 */
#define PORT_IP22ZILOG 56
-/* Sharp LH7a40x -- an ARM9 SoC series */
-#define PORT_LH7A40X 57
-
/* PPC CPM type number */
#define PORT_CPM 58
@@ -112,37 +108,23 @@
/* IBM icom */
#define PORT_ICOM 60
-/* Samsung S3C2440 SoC */
-#define PORT_S3C2440 61
-
/* Motorola i.MX SoC */
#define PORT_IMX 62
-/* Marvell MPSC (obsolete unused) */
-#define PORT_MPSC 63
-
/* TXX9 type number */
#define PORT_TXX9 64
-/* Samsung S3C2400 SoC */
-#define PORT_S3C2400 67
-
-/* M32R SIO */
-#define PORT_M32R_SIO 68
-
/*Digi jsm */
#define PORT_JSM 69
/* SUN4V Hypervisor Console */
#define PORT_SUNHV 72
-#define PORT_S3C2412 73
-
/* Xilinx uartlite */
#define PORT_UARTLITE 74
-/* Blackfin bf5xx */
-#define PORT_BFIN 75
+/* Broadcom BCM7271 UART */
+#define PORT_BCM7271 76
/* Broadcom SB1250, etc. SOC */
#define PORT_SB1250_DUART 77
@@ -150,13 +132,6 @@
/* Freescale ColdFire */
#define PORT_MCF 78
-/* Blackfin SPORT */
-#define PORT_BFIN_SPORT 79
-
-/* MN10300 on-chip UART numbers */
-#define PORT_MN10300 80
-#define PORT_MN10300_CTS 81
-
#define PORT_SC26XX 82
/* SH-SCI */
@@ -164,9 +139,6 @@
#define PORT_S3C6400 84
-/* NWPSERIAL, now removed */
-#define PORT_NWPSERIAL 85
-
/* MAX3100 */
#define PORT_MAX3100 86
@@ -225,13 +197,10 @@
/* ST ASC type numbers */
#define PORT_ASC 105
-/* Tilera TILE-Gx UART */
-#define PORT_TILEGX 106
-
/* MEN 16z135 UART */
#define PORT_MEN_Z135 107
-/* SC16IS74xx */
+/* SC16IS7xx */
#define PORT_SC16IS7XX 108
/* MESON */
@@ -243,9 +212,6 @@
/* SPRD SERIAL */
#define PORT_SPRD 111
-/* Cris v10 / v32 SoC */
-#define PORT_CRIS 112
-
/* STM32 USART */
#define PORT_STM32 113
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index 7e42a5b7558b..ff0a931833e2 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -56,7 +56,7 @@ struct sync_fence_info {
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
* @flags: sync_file_info flags
- * @num_fences number of fences in the sync_file
+ * @num_fences: number of fences in the sync_file
* @pad: padding for 64-bit alignment, should always be zero
* @sync_fence_info: pointer to array of struct &sync_fence_info with all
* fences in the sync_file
diff --git a/include/uapi/linux/ublk_cmd.h b/include/uapi/linux/ublk_cmd.h
index 4b8558db90e1..b9cfc5c96268 100644
--- a/include/uapi/linux/ublk_cmd.h
+++ b/include/uapi/linux/ublk_cmd.h
@@ -176,6 +176,12 @@
/* Copy between request and user buffer by pread()/pwrite() */
#define UBLK_F_USER_COPY (1UL << 7)
+/*
+ * User space sets this flag when setting up the device to request zoned storage support. Kernel may
+ * deny the request by returning an error.
+ */
+#define UBLK_F_ZONED (1ULL << 8)
+
/* device state */
#define UBLK_S_DEV_DEAD 0
#define UBLK_S_DEV_LIVE 1
@@ -232,9 +238,27 @@ struct ublksrv_ctrl_dev_info {
#define UBLK_IO_OP_READ 0
#define UBLK_IO_OP_WRITE 1
#define UBLK_IO_OP_FLUSH 2
-#define UBLK_IO_OP_DISCARD 3
-#define UBLK_IO_OP_WRITE_SAME 4
-#define UBLK_IO_OP_WRITE_ZEROES 5
+#define UBLK_IO_OP_DISCARD 3
+#define UBLK_IO_OP_WRITE_SAME 4
+#define UBLK_IO_OP_WRITE_ZEROES 5
+#define UBLK_IO_OP_ZONE_OPEN 10
+#define UBLK_IO_OP_ZONE_CLOSE 11
+#define UBLK_IO_OP_ZONE_FINISH 12
+#define UBLK_IO_OP_ZONE_APPEND 13
+#define UBLK_IO_OP_ZONE_RESET_ALL 14
+#define UBLK_IO_OP_ZONE_RESET 15
+/*
+ * Construct a zone report. The report request is carried in `struct
+ * ublksrv_io_desc`. The `start_sector` field must be the first sector of a zone
+ * and shall indicate the first zone of the report. The `nr_zones` shall
+ * indicate how many zones should be reported at most. The report shall be
+ * delivered as a `struct blk_zone` array. To report fewer zones than requested,
+ * zero the last entry of the returned array.
+ *
+ * Related definitions(blk_zone, blk_zone_cond, blk_zone_type, ...) in
+ * include/uapi/linux/blkzoned.h are part of ublk UAPI.
+ */
+#define UBLK_IO_OP_REPORT_ZONES 18
#define UBLK_IO_F_FAILFAST_DEV (1U << 8)
#define UBLK_IO_F_FAILFAST_TRANSPORT (1U << 9)
@@ -255,7 +279,10 @@ struct ublksrv_io_desc {
/* op: bit 0-7, flags: bit 8-31 */
__u32 op_flags;
- __u32 nr_sectors;
+ union {
+ __u32 nr_sectors;
+ __u32 nr_zones; /* for UBLK_IO_OP_REPORT_ZONES */
+ };
/* start sector for this io */
__u64 start_sector;
@@ -284,11 +311,21 @@ struct ublksrv_io_cmd {
/* io result, it is valid for COMMIT* command only */
__s32 result;
- /*
- * userspace buffer address in ublksrv daemon process, valid for
- * FETCH* command only
- */
- __u64 addr;
+ union {
+ /*
+ * userspace buffer address in ublksrv daemon process, valid for
+ * FETCH* command only
+ *
+ * `addr` should not be used when UBLK_F_USER_COPY is enabled,
+ * because userspace handles data copy by pread()/pwrite() over
+ * /dev/ublkcN. But in case of UBLK_F_ZONED, this union is
+ * re-used to pass back the allocated LBA for
+ * UBLK_IO_OP_ZONE_APPEND which actually depends on
+ * UBLK_F_USER_COPY
+ */
+ __u64 addr;
+ __u64 zone_append_lba;
+ };
};
struct ublk_param_basic {
@@ -331,6 +368,13 @@ struct ublk_param_devt {
__u32 disk_minor;
};
+struct ublk_param_zoned {
+ __u32 max_open_zones;
+ __u32 max_active_zones;
+ __u32 max_zone_append_sectors;
+ __u8 reserved[20];
+};
+
struct ublk_params {
/*
* Total length of parameters, userspace has to set 'len' for both
@@ -342,11 +386,13 @@ struct ublk_params {
#define UBLK_PARAM_TYPE_BASIC (1 << 0)
#define UBLK_PARAM_TYPE_DISCARD (1 << 1)
#define UBLK_PARAM_TYPE_DEVT (1 << 2)
+#define UBLK_PARAM_TYPE_ZONED (1 << 3)
__u32 types; /* types of parameter included */
struct ublk_param_basic basic;
struct ublk_param_discard discard;
struct ublk_param_devt devt;
+ struct ublk_param_zoned zoned;
};
#endif
diff --git a/include/uapi/linux/usb/ch11.h b/include/uapi/linux/usb/ch11.h
index fb0cd24c392c..ce4c83f2e66a 100644
--- a/include/uapi/linux/usb/ch11.h
+++ b/include/uapi/linux/usb/ch11.h
@@ -15,10 +15,8 @@
/* This is arbitrary.
* From USB 2.0 spec Table 11-13, offset 7, a hub can
* have up to 255 ports. The most yet reported is 10.
- *
- * Current Wireless USB host hardware (Intel i1480 for example) allows
- * up to 22 devices to connect. Upcoming hardware might raise that
- * limit. Because the arrays need to add a bit for hub status data, we
+ * Upcoming hardware might raise that limit.
+ * Because the arrays need to add a bit for hub status data, we
* use 31, so plus one evens out to four bytes.
*/
#define USB_MAXCHILDREN 31
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 62d318377379..8a147abfc680 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -3,7 +3,7 @@
* This file holds USB constants and structures that are needed for
* USB device APIs. These are used by the USB device model, which is
* defined in chapter 9 of the USB 2.0 specification and in the
- * Wireless USB 1.0 (spread around). Linux has several APIs in C that
+ * Wireless USB 1.0 spec (now defunct). Linux has several APIs in C that
* need these:
*
* - the master/host side Linux-USB kernel driver API;
@@ -14,9 +14,6 @@
* act either as a USB master/host or as a USB slave/device. That means
* the master and slave side APIs benefit from working well together.
*
- * There's also "Wireless USB", using low power short range radios for
- * peripheral interconnection but otherwise building on the USB framework.
- *
* Note all descriptors are declared '__attribute__((packed))' so that:
*
* [a] they never get padded, either internally (USB spec writers
diff --git a/include/uapi/linux/userfaultfd.h b/include/uapi/linux/userfaultfd.h
index 66dd4cd277bd..62151706c5a3 100644
--- a/include/uapi/linux/userfaultfd.h
+++ b/include/uapi/linux/userfaultfd.h
@@ -39,7 +39,8 @@
UFFD_FEATURE_MINOR_SHMEM | \
UFFD_FEATURE_EXACT_ADDRESS | \
UFFD_FEATURE_WP_HUGETLBFS_SHMEM | \
- UFFD_FEATURE_WP_UNPOPULATED)
+ UFFD_FEATURE_WP_UNPOPULATED | \
+ UFFD_FEATURE_POISON)
#define UFFD_API_IOCTLS \
((__u64)1 << _UFFDIO_REGISTER | \
(__u64)1 << _UFFDIO_UNREGISTER | \
@@ -49,12 +50,14 @@
(__u64)1 << _UFFDIO_COPY | \
(__u64)1 << _UFFDIO_ZEROPAGE | \
(__u64)1 << _UFFDIO_WRITEPROTECT | \
- (__u64)1 << _UFFDIO_CONTINUE)
+ (__u64)1 << _UFFDIO_CONTINUE | \
+ (__u64)1 << _UFFDIO_POISON)
#define UFFD_API_RANGE_IOCTLS_BASIC \
((__u64)1 << _UFFDIO_WAKE | \
(__u64)1 << _UFFDIO_COPY | \
+ (__u64)1 << _UFFDIO_WRITEPROTECT | \
(__u64)1 << _UFFDIO_CONTINUE | \
- (__u64)1 << _UFFDIO_WRITEPROTECT)
+ (__u64)1 << _UFFDIO_POISON)
/*
* Valid ioctl command number range with this API is from 0x00 to
@@ -71,6 +74,7 @@
#define _UFFDIO_ZEROPAGE (0x04)
#define _UFFDIO_WRITEPROTECT (0x06)
#define _UFFDIO_CONTINUE (0x07)
+#define _UFFDIO_POISON (0x08)
#define _UFFDIO_API (0x3F)
/* userfaultfd ioctl ids */
@@ -91,6 +95,8 @@
struct uffdio_writeprotect)
#define UFFDIO_CONTINUE _IOWR(UFFDIO, _UFFDIO_CONTINUE, \
struct uffdio_continue)
+#define UFFDIO_POISON _IOWR(UFFDIO, _UFFDIO_POISON, \
+ struct uffdio_poison)
/* read() structure */
struct uffd_msg {
@@ -225,6 +231,7 @@ struct uffdio_api {
#define UFFD_FEATURE_EXACT_ADDRESS (1<<11)
#define UFFD_FEATURE_WP_HUGETLBFS_SHMEM (1<<12)
#define UFFD_FEATURE_WP_UNPOPULATED (1<<13)
+#define UFFD_FEATURE_POISON (1<<14)
__u64 features;
__u64 ioctls;
@@ -321,6 +328,18 @@ struct uffdio_continue {
__s64 mapped;
};
+struct uffdio_poison {
+ struct uffdio_range range;
+#define UFFDIO_POISON_MODE_DONTWAKE ((__u64)1<<0)
+ __u64 mode;
+
+ /*
+ * Fields below here are written by the ioctl and must be at the end:
+ * the copy_from_user will not read past here.
+ */
+ __s64 updated;
+};
+
/*
* Flags for the userfaultfd(2) system call itself.
*/
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 20c804bdc09c..afc1369216d9 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -217,6 +217,7 @@ struct vfio_device_info {
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
__u32 cap_offset; /* Offset within info struct of first cap */
+ __u32 pad;
};
#define VFIO_DEVICE_GET_INFO _IO(VFIO_TYPE, VFIO_BASE + 7)
@@ -677,11 +678,60 @@ enum {
* VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
*
+ * This command is used to query the affected devices in the hot reset for
+ * a given device.
+ *
+ * This command always reports the segment, bus, and devfn information for
+ * each affected device, and selectively reports the group_id or devid per
+ * the way how the calling device is opened.
+ *
+ * - If the calling device is opened via the traditional group/container
+ * API, group_id is reported. User should check if it has owned all
+ * the affected devices and provides a set of group fds to prove the
+ * ownership in VFIO_DEVICE_PCI_HOT_RESET ioctl.
+ *
+ * - If the calling device is opened as a cdev, devid is reported.
+ * Flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set to indicate this
+ * data type. All the affected devices should be represented in
+ * the dev_set, ex. bound to a vfio driver, and also be owned by
+ * this interface which is determined by the following conditions:
+ * 1) Has a valid devid within the iommufd_ctx of the calling device.
+ * Ownership cannot be determined across separate iommufd_ctx and
+ * the cdev calling conventions do not support a proof-of-ownership
+ * model as provided in the legacy group interface. In this case
+ * valid devid with value greater than zero is provided in the return
+ * structure.
+ * 2) Does not have a valid devid within the iommufd_ctx of the calling
+ * device, but belongs to the same IOMMU group as the calling device
+ * or another opened device that has a valid devid within the
+ * iommufd_ctx of the calling device. This provides implicit ownership
+ * for devices within the same DMA isolation context. In this case
+ * the devid value of VFIO_PCI_DEVID_OWNED is provided in the return
+ * structure.
+ *
+ * A devid value of VFIO_PCI_DEVID_NOT_OWNED is provided in the return
+ * structure for affected devices where device is NOT represented in the
+ * dev_set or ownership is not available. Such devices prevent the use
+ * of VFIO_DEVICE_PCI_HOT_RESET ioctl outside of the proof-of-ownership
+ * calling conventions (ie. via legacy group accessed devices). Flag
+ * VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED would be set when all the
+ * affected devices are represented in the dev_set and also owned by
+ * the user. This flag is available only when
+ * flag VFIO_PCI_HOT_RESET_FLAG_DEV_ID is set, otherwise reserved.
+ * When set, user could invoke VFIO_DEVICE_PCI_HOT_RESET with a zero
+ * length fd array on the calling device as the ownership is validated
+ * by iommufd_ctx.
+ *
* Return: 0 on success, -errno on failure:
* -enospc = insufficient buffer, -enodev = unsupported for device.
*/
struct vfio_pci_dependent_device {
- __u32 group_id;
+ union {
+ __u32 group_id;
+ __u32 devid;
+#define VFIO_PCI_DEVID_OWNED 0
+#define VFIO_PCI_DEVID_NOT_OWNED -1
+ };
__u16 segment;
__u8 bus;
__u8 devfn; /* Use PCI_SLOT/PCI_FUNC */
@@ -690,6 +740,8 @@ struct vfio_pci_dependent_device {
struct vfio_pci_hot_reset_info {
__u32 argsz;
__u32 flags;
+#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID (1 << 0)
+#define VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED (1 << 1)
__u32 count;
struct vfio_pci_dependent_device devices[];
};
@@ -700,6 +752,24 @@ struct vfio_pci_hot_reset_info {
* VFIO_DEVICE_PCI_HOT_RESET - _IOW(VFIO_TYPE, VFIO_BASE + 13,
* struct vfio_pci_hot_reset)
*
+ * A PCI hot reset results in either a bus or slot reset which may affect
+ * other devices sharing the bus/slot. The calling user must have
+ * ownership of the full set of affected devices as determined by the
+ * VFIO_DEVICE_GET_PCI_HOT_RESET_INFO ioctl.
+ *
+ * When called on a device file descriptor acquired through the vfio
+ * group interface, the user is required to provide proof of ownership
+ * of those affected devices via the group_fds array in struct
+ * vfio_pci_hot_reset.
+ *
+ * When called on a direct cdev opened vfio device, the flags field of
+ * struct vfio_pci_hot_reset_info reports the ownership status of the
+ * affected devices and this ioctl must be called with an empty group_fds
+ * array. See above INFO ioctl definition for ownership requirements.
+ *
+ * Mixed usage of legacy groups and cdevs across the set of affected
+ * devices is not supported.
+ *
* Return: 0 on success, -errno on failure.
*/
struct vfio_pci_hot_reset {
@@ -829,6 +899,83 @@ struct vfio_device_feature {
#define VFIO_DEVICE_FEATURE _IO(VFIO_TYPE, VFIO_BASE + 17)
/*
+ * VFIO_DEVICE_BIND_IOMMUFD - _IOR(VFIO_TYPE, VFIO_BASE + 18,
+ * struct vfio_device_bind_iommufd)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ * @iommufd: iommufd to bind.
+ * @out_devid: The device id generated by this bind. devid is a handle for
+ * this device/iommufd bond and can be used in IOMMUFD commands.
+ *
+ * Bind a vfio_device to the specified iommufd.
+ *
+ * User is restricted from accessing the device before the binding operation
+ * is completed. Only allowed on cdev fds.
+ *
+ * Unbind is automatically conducted when device fd is closed.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_bind_iommufd {
+ __u32 argsz;
+ __u32 flags;
+ __s32 iommufd;
+ __u32 out_devid;
+};
+
+#define VFIO_DEVICE_BIND_IOMMUFD _IO(VFIO_TYPE, VFIO_BASE + 18)
+
+/*
+ * VFIO_DEVICE_ATTACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 19,
+ * struct vfio_device_attach_iommufd_pt)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ * @pt_id: Input the target id which can represent an ioas or a hwpt
+ * allocated via iommufd subsystem.
+ * Output the input ioas id or the attached hwpt id which could
+ * be the specified hwpt itself or a hwpt automatically created
+ * for the specified ioas by kernel during the attachment.
+ *
+ * Associate the device with an address space within the bound iommufd.
+ * Undo by VFIO_DEVICE_DETACH_IOMMUFD_PT or device fd close. This is only
+ * allowed on cdev fds.
+ *
+ * If a vfio device is currently attached to a valid hw_pagetable, without doing
+ * a VFIO_DEVICE_DETACH_IOMMUFD_PT, a second VFIO_DEVICE_ATTACH_IOMMUFD_PT ioctl
+ * passing in another hw_pagetable (hwpt) id is allowed. This action, also known
+ * as a hw_pagetable replacement, will replace the device's currently attached
+ * hw_pagetable with a new hw_pagetable corresponding to the given pt_id.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_attach_iommufd_pt {
+ __u32 argsz;
+ __u32 flags;
+ __u32 pt_id;
+};
+
+#define VFIO_DEVICE_ATTACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 19)
+
+/*
+ * VFIO_DEVICE_DETACH_IOMMUFD_PT - _IOW(VFIO_TYPE, VFIO_BASE + 20,
+ * struct vfio_device_detach_iommufd_pt)
+ * @argsz: User filled size of this data.
+ * @flags: Must be 0.
+ *
+ * Remove the association of the device and its current associated address
+ * space. After it, the device should be in a blocking DMA state. This is only
+ * allowed on cdev fds.
+ *
+ * Return: 0 on success, -errno on failure.
+ */
+struct vfio_device_detach_iommufd_pt {
+ __u32 argsz;
+ __u32 flags;
+};
+
+#define VFIO_DEVICE_DETACH_IOMMUFD_PT _IO(VFIO_TYPE, VFIO_BASE + 20)
+
+/*
* Provide support for setting a PCI VF Token, which is used as a shared
* secret between PF and VF drivers. This feature may only be set on a
* PCI SR-IOV PF when SR-IOV is enabled on the PF and there are no existing
@@ -1304,6 +1451,7 @@ struct vfio_iommu_type1_info {
#define VFIO_IOMMU_INFO_CAPS (1 << 1) /* Info supports caps */
__u64 iova_pgsizes; /* Bitmap of supported page sizes */
__u32 cap_offset; /* Offset within info struct of first cap */
+ __u32 pad;
};
/*
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index d3aad12ad1fa..2d827d22cd99 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -181,5 +181,9 @@ struct vhost_vdpa_iova_range {
#define VHOST_BACKEND_F_SUSPEND 0x4
/* Device can be resumed */
#define VHOST_BACKEND_F_RESUME 0x5
+/* Device supports the driver enabling virtqueues both before and after
+ * DRIVER_OK
+ */
+#define VHOST_BACKEND_F_ENABLE_AFTER_DRIVER_OK 0x6
#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 3af6a82d0cad..78260e5d9985 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -796,6 +796,8 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
#define V4L2_PIX_FMT_MM21 v4l2_fourcc('M', 'M', '2', '1') /* Mediatek 8-bit block mode, two non-contiguous planes */
+#define V4L2_PIX_FMT_MT2110T v4l2_fourcc('M', 'T', '2', 'T') /* Mediatek 10-bit block tile mode */
+#define V4L2_PIX_FMT_MT2110R v4l2_fourcc('M', 'T', '2', 'R') /* Mediatek 10-bit block raster mode */
#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
#define V4L2_PIX_FMT_CNF4 v4l2_fourcc('C', 'N', 'F', '4') /* Intel 4-bit packed depth confidence information */
#define V4L2_PIX_FMT_HI240 v4l2_fourcc('H', 'I', '2', '4') /* BTTV 8-bit dithered RGB */
diff --git a/include/uapi/rdma/bnxt_re-abi.h b/include/uapi/rdma/bnxt_re-abi.h
index 8a2a1d4f6b29..6e7c67a0cca3 100644
--- a/include/uapi/rdma/bnxt_re-abi.h
+++ b/include/uapi/rdma/bnxt_re-abi.h
@@ -53,6 +53,7 @@ enum {
BNXT_RE_UCNTX_CMASK_HAVE_CCTX = 0x1ULL,
BNXT_RE_UCNTX_CMASK_HAVE_MODE = 0x02ULL,
BNXT_RE_UCNTX_CMASK_WC_DPI_ENABLED = 0x04ULL,
+ BNXT_RE_UCNTX_CMASK_DBR_PACING_ENABLED = 0x08ULL,
};
enum bnxt_re_wqe_mode {
@@ -131,10 +132,13 @@ enum bnxt_re_shpg_offt {
enum bnxt_re_objects {
BNXT_RE_OBJECT_ALLOC_PAGE = (1U << UVERBS_ID_NS_SHIFT),
+ BNXT_RE_OBJECT_NOTIFY_DRV,
};
enum bnxt_re_alloc_page_type {
BNXT_RE_ALLOC_WC_PAGE = 0,
+ BNXT_RE_ALLOC_DBR_BAR_PAGE,
+ BNXT_RE_ALLOC_DBR_PAGE,
};
enum bnxt_re_var_alloc_page_attrs {
@@ -154,4 +158,7 @@ enum bnxt_re_alloc_page_methods {
BNXT_RE_METHOD_DESTROY_PAGE,
};
+enum bnxt_re_notify_drv_methods {
+ BNXT_RE_METHOD_NOTIFY_DRV = (1U << UVERBS_ID_NS_SHIFT),
+};
#endif /* __BNXT_RE_UVERBS_ABI_H__*/
diff --git a/include/uapi/rdma/irdma-abi.h b/include/uapi/rdma/irdma-abi.h
index a7085e092d34..bb18f15489e3 100644
--- a/include/uapi/rdma/irdma-abi.h
+++ b/include/uapi/rdma/irdma-abi.h
@@ -22,10 +22,16 @@ enum irdma_memreg_type {
IRDMA_MEMREG_TYPE_CQ = 2,
};
+enum {
+ IRDMA_ALLOC_UCTX_USE_RAW_ATTR = 1 << 0,
+ IRDMA_ALLOC_UCTX_MIN_HW_WQ_SIZE = 1 << 1,
+};
+
struct irdma_alloc_ucontext_req {
__u32 rsvd32;
__u8 userspace_ver;
__u8 rsvd8[3];
+ __aligned_u64 comp_mask;
};
struct irdma_alloc_ucontext_resp {
@@ -46,6 +52,9 @@ struct irdma_alloc_ucontext_resp {
__u16 max_hw_sq_chunk;
__u8 hw_rev;
__u8 rsvd2;
+ __aligned_u64 comp_mask;
+ __u16 min_hw_wq_size;
+ __u8 rsvd3[6];
};
struct irdma_alloc_pd_resp {
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index fd3f9e5ee241..03f2beadf201 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -8,6 +8,7 @@
#ifndef SCSI_BSG_UFS_H
#define SCSI_BSG_UFS_H
+#include <asm/byteorder.h>
#include <linux/types.h>
/*
* This file intended to be included by both kernel and user space
@@ -40,11 +41,56 @@ enum ufs_rpmb_op_type {
* @dword_0: UPIU header DW-0
* @dword_1: UPIU header DW-1
* @dword_2: UPIU header DW-2
+ *
+ * @transaction_code: Type of request or response. See also enum
+ * upiu_request_transaction and enum upiu_response_transaction.
+ * @flags: UPIU flags. The meaning of individual flags depends on the
+ * transaction code.
+ * @lun: Logical unit number.
+ * @task_tag: Task tag.
+ * @iid: Initiator ID.
+ * @command_set_type: 0 for SCSI command set; 1 for UFS specific.
+ * @tm_function: Task management function in case of a task management request
+ * UPIU.
+ * @query_function: Query function in case of a query request UPIU.
+ * @response: 0 for success; 1 for failure.
+ * @status: SCSI status if this is the header of a response to a SCSI command.
+ * @ehs_length: EHS length in units of 32 bytes.
+ * @device_information:
+ * @data_segment_length: data segment length.
*/
struct utp_upiu_header {
- __be32 dword_0;
- __be32 dword_1;
- __be32 dword_2;
+ union {
+ struct {
+ __be32 dword_0;
+ __be32 dword_1;
+ __be32 dword_2;
+ };
+ struct {
+ __u8 transaction_code;
+ __u8 flags;
+ __u8 lun;
+ __u8 task_tag;
+#if defined(__BIG_ENDIAN)
+ __u8 iid: 4;
+ __u8 command_set_type: 4;
+#elif defined(__LITTLE_ENDIAN)
+ __u8 command_set_type: 4;
+ __u8 iid: 4;
+#else
+#error
+#endif
+ union {
+ __u8 tm_function;
+ __u8 query_function;
+ } __attribute__((packed));
+ __u8 response;
+ __u8 status;
+ __u8 ehs_length;
+ __u8 device_information;
+ __be16 data_segment_length;
+ };
+ };
};
/**
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index e9ec7e4eb982..453cab2a1209 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -99,7 +99,11 @@
#define SOF_TKN_COMP_OUTPUT_PIN_BINDING_WNAME 414
#define SOF_TKN_COMP_NUM_INPUT_AUDIO_FORMATS 415
#define SOF_TKN_COMP_NUM_OUTPUT_AUDIO_FORMATS 416
-
+/*
+ * The token value is copied to the dapm_widget's
+ * no_wname_in_kcontrol_name.
+ */
+#define SOF_TKN_COMP_NO_WNAME_IN_KCONTROL_NAME 417
/* SSP */
#define SOF_TKN_INTEL_SSP_CLKS_CONTROL 500
diff --git a/include/ufs/ufs.h b/include/ufs/ufs.h
index 198cb391f9db..0cced88f4531 100644
--- a/include/ufs/ufs.h
+++ b/include/ufs/ufs.h
@@ -11,10 +11,16 @@
#ifndef _UFS_H
#define _UFS_H
-#include <linux/mutex.h>
+#include <linux/bitops.h>
#include <linux/types.h>
#include <uapi/scsi/scsi_bsg_ufs.h>
+/*
+ * Using static_assert() is not allowed in UAPI header files. Hence the check
+ * in this header file of the size of struct utp_upiu_header.
+ */
+static_assert(sizeof(struct utp_upiu_header) == 12);
+
#define GENERAL_UPIU_REQUEST_SIZE (sizeof(struct utp_upiu_req))
#define QUERY_DESC_MAX_SIZE 255
#define QUERY_DESC_MIN_SIZE 2
@@ -23,9 +29,6 @@
(sizeof(struct utp_upiu_header)))
#define UFS_SENSE_SIZE 18
-#define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
- cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
- (byte1 << 8) | (byte0))
/*
* UFS device may have standard LUs and LUN id could be from 0x00 to
* 0x7F. Standard LUs use "Peripheral Device Addressing Format".
@@ -76,7 +79,7 @@ enum {
};
/* UTP UPIU Transaction Codes Initiator to Target */
-enum {
+enum upiu_request_transaction {
UPIU_TRANSACTION_NOP_OUT = 0x00,
UPIU_TRANSACTION_COMMAND = 0x01,
UPIU_TRANSACTION_DATA_OUT = 0x02,
@@ -85,7 +88,7 @@ enum {
};
/* UTP UPIU Transaction Codes Target to Initiator */
-enum {
+enum upiu_response_transaction {
UPIU_TRANSACTION_NOP_IN = 0x20,
UPIU_TRANSACTION_RESPONSE = 0x21,
UPIU_TRANSACTION_DATA_IN = 0x22,
@@ -102,6 +105,12 @@ enum {
UPIU_CMD_FLAGS_READ = 0x40,
};
+/* UPIU response flags */
+enum {
+ UPIU_RSP_FLAG_UNDERFLOW = 0x20,
+ UPIU_RSP_FLAG_OVERFLOW = 0x40,
+};
+
/* UPIU Task Attributes */
enum {
UPIU_TASK_ATTR_SIMPLE = 0x00,
@@ -466,21 +475,11 @@ enum {
UPIU_COMMAND_SET_TYPE_QUERY = 0x2,
};
-/* UTP Transfer Request Command Offset */
-#define UPIU_COMMAND_TYPE_OFFSET 28
-
/* Offset of the response code in the UPIU header */
#define UPIU_RSP_CODE_OFFSET 8
enum {
- MASK_SCSI_STATUS = 0xFF,
- MASK_TASK_RESPONSE = 0xFF00,
- MASK_RSP_UPIU_RESULT = 0xFFFF,
- MASK_QUERY_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_UPIU_DATA_SEG_LEN = 0xFFFF,
- MASK_RSP_EXCEPTION_EVENT = 0x10000,
MASK_TM_SERVICE_RESP = 0xFF,
- MASK_TM_FUNC = 0xFF,
};
/* Task management service response */
@@ -516,41 +515,6 @@ struct utp_cmd_rsp {
u8 sense_data[UFS_SENSE_SIZE];
};
-struct ufshpb_active_field {
- __be16 active_rgn;
- __be16 active_srgn;
-};
-#define HPB_ACT_FIELD_SIZE 4
-
-/**
- * struct utp_hpb_rsp - Response UPIU structure
- * @residual_transfer_count: Residual transfer count DW-3
- * @reserved1: Reserved double words DW-4 to DW-7
- * @sense_data_len: Sense data length DW-8 U16
- * @desc_type: Descriptor type of sense data
- * @additional_len: Additional length of sense data
- * @hpb_op: HPB operation type
- * @lun: LUN of response UPIU
- * @active_rgn_cnt: Active region count
- * @inactive_rgn_cnt: Inactive region count
- * @hpb_active_field: Recommended to read HPB region and subregion
- * @hpb_inactive_field: To be inactivated HPB region and subregion
- */
-struct utp_hpb_rsp {
- __be32 residual_transfer_count;
- __be32 reserved1[4];
- __be16 sense_data_len;
- u8 desc_type;
- u8 additional_len;
- u8 hpb_op;
- u8 lun;
- u8 active_rgn_cnt;
- u8 inactive_rgn_cnt;
- struct ufshpb_active_field hpb_active_field[2];
- __be16 hpb_inactive_field[2];
-};
-#define UTP_HPB_RSP_SIZE 40
-
/**
* struct utp_upiu_rsp - general upiu response structure
* @header: UPIU header structure DW-0 to DW-2
@@ -561,31 +525,10 @@ struct utp_upiu_rsp {
struct utp_upiu_header header;
union {
struct utp_cmd_rsp sr;
- struct utp_hpb_rsp hr;
struct utp_upiu_query qr;
};
};
-/**
- * struct ufs_query_req - parameters for building a query request
- * @query_func: UPIU header query function
- * @upiu_req: the query request data
- */
-struct ufs_query_req {
- u8 query_func;
- struct utp_upiu_query upiu_req;
-};
-
-/**
- * struct ufs_query_resp - UPIU QUERY
- * @response: device response code
- * @upiu_res: query response data
- */
-struct ufs_query_res {
- u8 response;
- struct utp_upiu_query upiu_res;
-};
-
/*
* VCCQ & VCCQ2 current requirement when UFS device is in sleep state
* and link is in Hibern8 state.
@@ -621,9 +564,6 @@ struct ufs_dev_info {
/* Stores the depth of queue in UFS device */
u8 bqueuedepth;
- /* UFS HPB related flag */
- bool hpb_enabled;
-
/* UFS WB related flags */
bool wb_enabled;
bool wb_buf_flush_enabled;
diff --git a/include/ufs/ufs_quirks.h b/include/ufs/ufs_quirks.h
index bcb4f004bed5..41ff44dfa1db 100644
--- a/include/ufs/ufs_quirks.h
+++ b/include/ufs/ufs_quirks.h
@@ -107,10 +107,4 @@ struct ufs_dev_quirk {
*/
#define UFS_DEVICE_QUIRK_DELAY_AFTER_LPM (1 << 11)
-/*
- * Some UFS devices require L2P entry should be swapped before being sent to the
- * UFS device for HPB READ command.
- */
-#define UFS_DEVICE_QUIRK_SWAP_L2P_ENTRY_FOR_HPB_READ (1 << 12)
-
#endif /* UFS_QUIRKS_H_ */
diff --git a/include/ufs/ufshcd.h b/include/ufs/ufshcd.h
index 6dc11fa0ebb1..7d07b256e906 100644
--- a/include/ufs/ufshcd.h
+++ b/include/ufs/ufshcd.h
@@ -20,6 +20,7 @@
#include <linux/pm_runtime.h>
#include <linux/dma-direction.h>
#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
#include <ufs/unipro.h>
#include <ufs/ufs.h>
#include <ufs/ufs_quirks.h>
@@ -202,6 +203,25 @@ struct ufshcd_lrb {
};
/**
+ * struct ufs_query_req - parameters for building a query request
+ * @query_func: UPIU header query function
+ * @upiu_req: the query request data
+ */
+struct ufs_query_req {
+ u8 query_func;
+ struct utp_upiu_query upiu_req;
+};
+
+/**
+ * struct ufs_query_resp - UPIU QUERY
+ * @response: device response code
+ * @upiu_res: query response data
+ */
+struct ufs_query_res {
+ struct utp_upiu_query upiu_res;
+};
+
+/**
* struct ufs_query - holds relevant data structures for query request
* @request: request upiu and function
* @descriptor: buffer for sending/receiving descriptor
@@ -709,31 +729,6 @@ struct ufs_hba_variant_params {
u32 wb_flush_threshold;
};
-#ifdef CONFIG_SCSI_UFS_HPB
-/**
- * struct ufshpb_dev_info - UFSHPB device related info
- * @num_lu: the number of user logical unit to check whether all lu finished
- * initialization
- * @rgn_size: device reported HPB region size
- * @srgn_size: device reported HPB sub-region size
- * @slave_conf_cnt: counter to check all lu finished initialization
- * @hpb_disabled: flag to check if HPB is disabled
- * @max_hpb_single_cmd: device reported bMAX_DATA_SIZE_FOR_SINGLE_CMD value
- * @is_legacy: flag to check HPB 1.0
- * @control_mode: either host or device
- */
-struct ufshpb_dev_info {
- int num_lu;
- int rgn_size;
- int srgn_size;
- atomic_t slave_conf_cnt;
- bool hpb_disabled;
- u8 max_hpb_single_cmd;
- bool is_legacy;
- u8 control_mode;
-};
-#endif
-
struct ufs_hba_monitor {
unsigned long chunk_size;
@@ -894,7 +889,6 @@ enum ufshcd_mcq_opr {
* @rpm_dev_flush_recheck_work: used to suspend from RPM (runtime power
* management) after the UFS device has finished a WriteBooster buffer
* flush or auto BKOP.
- * @ufshpb_dev: information related to HPB (Host Performance Booster).
* @monitor: statistics about UFS commands
* @crypto_capabilities: Content of crypto capabilities register (0x100)
* @crypto_cap_array: Array of crypto capabilities
@@ -1050,10 +1044,6 @@ struct ufs_hba {
struct request_queue *bsg_queue;
struct delayed_work rpm_dev_flush_recheck_work;
-#ifdef CONFIG_SCSI_UFS_HPB
- struct ufshpb_dev_info ufshpb_dev;
-#endif
-
struct ufs_hba_monitor monitor;
#ifdef CONFIG_SCSI_UFS_CRYPTO
@@ -1254,9 +1244,12 @@ void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
void ufshcd_update_evt_hist(struct ufs_hba *hba, u32 id, u32 val);
void ufshcd_hba_stop(struct ufs_hba *hba);
void ufshcd_schedule_eh_work(struct ufs_hba *hba);
+void ufshcd_mcq_config_mac(struct ufs_hba *hba, u32 max_active_cmds);
+u32 ufshcd_mcq_read_cqis(struct ufs_hba *hba, int i);
void ufshcd_mcq_write_cqis(struct ufs_hba *hba, u32 val, int i);
unsigned long ufshcd_mcq_poll_cqe_lock(struct ufs_hba *hba,
struct ufs_hw_queue *hwq);
+void ufshcd_mcq_make_queues_operational(struct ufs_hba *hba);
void ufshcd_mcq_enable_esi(struct ufs_hba *hba);
void ufshcd_mcq_config_esi(struct ufs_hba *hba, struct msi_msg *msg);
@@ -1383,12 +1376,6 @@ int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg);
int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
-int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
- struct utp_upiu_req *req_upiu,
- struct utp_upiu_req *rsp_upiu,
- int msgcode,
- u8 *desc_buff, int *buff_len,
- enum query_opcode desc_op);
int ufshcd_advanced_rpmb_req_handler(struct ufs_hba *hba, struct utp_upiu_req *req_upiu,
struct utp_upiu_req *rsp_upiu, struct ufs_ehs *ehs_req,
struct ufs_ehs *ehs_rsp, int sg_cnt,
@@ -1398,6 +1385,7 @@ int ufshcd_wb_toggle_buf_flush(struct ufs_hba *hba, bool enable);
int ufshcd_suspend_prepare(struct device *dev);
int __ufshcd_suspend_prepare(struct device *dev, bool rpm_ok_for_spm);
void ufshcd_resume_complete(struct device *dev);
+bool ufshcd_is_hba_active(struct ufs_hba *hba);
/* Wrapper functions for safely calling variant operations */
static inline int ufshcd_vops_init(struct ufs_hba *hba)
diff --git a/include/ufs/ufshci.h b/include/ufs/ufshci.h
index 146fbea76d98..d5accacae6bc 100644
--- a/include/ufs/ufshci.h
+++ b/include/ufs/ufshci.h
@@ -11,7 +11,8 @@
#ifndef _UFSHCI_H
#define _UFSHCI_H
-#include <scsi/scsi_host.h>
+#include <linux/types.h>
+#include <ufs/ufs.h>
enum {
TASK_REQ_UPIU_SIZE_DWORDS = 8,
@@ -126,7 +127,6 @@ enum {
};
#define SQ_ICU_ERR_CODE_MASK GENMASK(7, 4)
-#define UPIU_COMMAND_TYPE_MASK GENMASK(31, 28)
#define UFS_MASK(mask, offset) ((mask) << (offset))
/* UFS Version 08h */
@@ -438,15 +438,13 @@ enum {
UTP_SCSI_COMMAND = 0x00000000,
UTP_NATIVE_UFS_COMMAND = 0x10000000,
UTP_DEVICE_MANAGEMENT_FUNCTION = 0x20000000,
- UTP_REQ_DESC_INT_CMD = 0x01000000,
- UTP_REQ_DESC_CRYPTO_ENABLE_CMD = 0x00800000,
};
/* UTP Transfer Request Data Direction (DD) */
-enum {
- UTP_NO_DATA_TRANSFER = 0x00000000,
- UTP_HOST_TO_DEVICE = 0x02000000,
- UTP_DEVICE_TO_HOST = 0x04000000,
+enum utp_data_direction {
+ UTP_NO_DATA_TRANSFER = 0,
+ UTP_HOST_TO_DEVICE = 1,
+ UTP_DEVICE_TO_HOST = 2,
};
/* Overall command status values */
@@ -505,17 +503,38 @@ struct utp_transfer_cmd_desc {
/**
* struct request_desc_header - Descriptor Header common to both UTRD and UTMRD
- * @dword0: Descriptor Header DW0
- * @dword1: Descriptor Header DW1
- * @dword2: Descriptor Header DW2
- * @dword3: Descriptor Header DW3
*/
struct request_desc_header {
- __le32 dword_0;
- __le32 dword_1;
- __le32 dword_2;
- __le32 dword_3;
-};
+ u8 cci;
+ u8 ehs_length;
+#if defined(__BIG_ENDIAN)
+ u8 enable_crypto:1;
+ u8 reserved2:7;
+
+ u8 command_type:4;
+ u8 reserved1:1;
+ u8 data_direction:2;
+ u8 interrupt:1;
+#elif defined(__LITTLE_ENDIAN)
+ u8 reserved2:7;
+ u8 enable_crypto:1;
+
+ u8 interrupt:1;
+ u8 data_direction:2;
+ u8 reserved1:1;
+ u8 command_type:4;
+#else
+#error
+#endif
+
+ __le32 dunl;
+ u8 ocs;
+ u8 cds;
+ __le16 ldbc;
+ __le32 dunu;
+};
+
+static_assert(sizeof(struct request_desc_header) == 16);
/**
* struct utp_transfer_req_desc - UTP Transfer Request Descriptor (UTRD)
diff --git a/include/ufs/unipro.h b/include/ufs/unipro.h
index dc9dd1d23f0f..256eb3a43f54 100644
--- a/include/ufs/unipro.h
+++ b/include/ufs/unipro.h
@@ -230,6 +230,12 @@ enum ufs_hs_gear_tag {
UFS_HS_G5 /* HS Gear 5 */
};
+enum ufs_lanes {
+ UFS_LANE_DONT_CHANGE, /* Don't change Lane */
+ UFS_LANE_1, /* Lane 1 (default for reset) */
+ UFS_LANE_2, /* Lane 2 */
+};
+
enum ufs_unipro_ver {
UFS_UNIPRO_VER_RESERVED = 0,
UFS_UNIPRO_VER_1_40 = 1, /* UniPro version 1.40 */
diff --git a/include/xen/arm/hypervisor.h b/include/xen/arm/hypervisor.h
index 43ef24dd030e..9995695204f5 100644
--- a/include/xen/arm/hypervisor.h
+++ b/include/xen/arm/hypervisor.h
@@ -7,18 +7,6 @@
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
-/* Lazy mode for batching updates / context switch */
-enum paravirt_lazy_mode {
- PARAVIRT_LAZY_NONE,
- PARAVIRT_LAZY_MMU,
- PARAVIRT_LAZY_CPU,
-};
-
-static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
-{
- return PARAVIRT_LAZY_NONE;
-}
-
#ifdef CONFIG_XEN
void __init xen_early_init(void);
#else
diff --git a/include/xen/events.h b/include/xen/events.h
index 95d5e28de324..23932b0673dc 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -105,8 +105,7 @@ int irq_from_virq(unsigned int cpu, unsigned int virq);
evtchn_port_t evtchn_from_irq(unsigned irq);
int xen_set_callback_via(uint64_t via);
-void xen_evtchn_do_upcall(struct pt_regs *regs);
-int xen_hvm_evtchn_do_upcall(void);
+int xen_evtchn_do_upcall(void);
/* Bind a pirq for a physical interrupt to an irq. */
int xen_bind_pirq_gsi_to_irq(unsigned gsi,
diff --git a/include/xen/xen.h b/include/xen/xen.h
index f989162983c3..a1e5b3f18d69 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -29,6 +29,12 @@ extern bool xen_pvh;
extern uint32_t xen_start_flags;
+#ifdef CONFIG_XEN_PV
+extern bool xen_pv_pci_possible;
+#else
+#define xen_pv_pci_possible 0
+#endif
+
#include <xen/interface/hvm/start_info.h>
extern struct hvm_start_info pvh_start_info;
void xen_prepare_pvh(void);